diff --git a/DO_OPENAPI_COMMIT_SHA.txt b/DO_OPENAPI_COMMIT_SHA.txt
index 3e045b5..11ac4a0 100644
--- a/DO_OPENAPI_COMMIT_SHA.txt
+++ b/DO_OPENAPI_COMMIT_SHA.txt
@@ -1 +1 @@
-cf0a60a
+ebfa95a
diff --git a/src/pydo/_client.py b/src/pydo/_client.py
index 0fe2140..0eee1c6 100644
--- a/src/pydo/_client.py
+++ b/src/pydo/_client.py
@@ -26,10 +26,12 @@
CdnOperations,
CertificatesOperations,
DatabasesOperations,
+ DedicatedInferencesOperations,
DomainsOperations,
DropletActionsOperations,
DropletsOperations,
FirewallsOperations,
+ FunctionsAccessKeyOperations,
FunctionsOperations,
GenaiOperations,
ImageActionsOperations,
@@ -49,6 +51,7 @@
ReservedIPsOperations,
ReservedIPv6ActionsOperations,
ReservedIPv6Operations,
+ SecurityOperations,
SizesOperations,
SnapshotsOperations,
SpacesKeyOperations,
@@ -597,6 +600,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too
:vartype billing_insights: pydo.operations.BillingInsightsOperations
:ivar databases: DatabasesOperations operations
:vartype databases: pydo.operations.DatabasesOperations
+ :ivar dedicated_inferences: DedicatedInferencesOperations operations
+ :vartype dedicated_inferences: pydo.operations.DedicatedInferencesOperations
:ivar domains: DomainsOperations operations
:vartype domains: pydo.operations.DomainsOperations
:ivar droplets: DropletsOperations operations
@@ -609,6 +614,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too
:vartype firewalls: pydo.operations.FirewallsOperations
:ivar functions: FunctionsOperations operations
:vartype functions: pydo.operations.FunctionsOperations
+ :ivar functions_access_key: FunctionsAccessKeyOperations operations
+ :vartype functions_access_key: pydo.operations.FunctionsAccessKeyOperations
:ivar images: ImagesOperations operations
:vartype images: pydo.operations.ImagesOperations
:ivar image_actions: ImageActionsOperations operations
@@ -641,6 +648,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too
:vartype reserved_ipv6_actions: pydo.operations.ReservedIPv6ActionsOperations
:ivar byoip_prefixes: ByoipPrefixesOperations operations
:vartype byoip_prefixes: pydo.operations.ByoipPrefixesOperations
+ :ivar security: SecurityOperations operations
+ :vartype security: pydo.operations.SecurityOperations
:ivar sizes: SizesOperations operations
:vartype sizes: pydo.operations.SizesOperations
:ivar snapshots: SnapshotsOperations operations
@@ -746,6 +755,9 @@ def __init__(
self.databases = DatabasesOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.dedicated_inferences = DedicatedInferencesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.domains = DomainsOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -764,6 +776,9 @@ def __init__(
self.functions = FunctionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.functions_access_key = FunctionsAccessKeyOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.images = ImagesOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -812,6 +827,9 @@ def __init__(
self.byoip_prefixes = ByoipPrefixesOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.security = SecurityOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.sizes = SizesOperations(
self._client, self._config, self._serialize, self._deserialize
)
diff --git a/src/pydo/aio/_client.py b/src/pydo/aio/_client.py
index 6935483..64d498b 100644
--- a/src/pydo/aio/_client.py
+++ b/src/pydo/aio/_client.py
@@ -26,10 +26,12 @@
CdnOperations,
CertificatesOperations,
DatabasesOperations,
+ DedicatedInferencesOperations,
DomainsOperations,
DropletActionsOperations,
DropletsOperations,
FirewallsOperations,
+ FunctionsAccessKeyOperations,
FunctionsOperations,
GenaiOperations,
ImageActionsOperations,
@@ -49,6 +51,7 @@
ReservedIPsOperations,
ReservedIPv6ActionsOperations,
ReservedIPv6Operations,
+ SecurityOperations,
SizesOperations,
SnapshotsOperations,
SpacesKeyOperations,
@@ -597,6 +600,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too
:vartype billing_insights: pydo.aio.operations.BillingInsightsOperations
:ivar databases: DatabasesOperations operations
:vartype databases: pydo.aio.operations.DatabasesOperations
+ :ivar dedicated_inferences: DedicatedInferencesOperations operations
+ :vartype dedicated_inferences: pydo.aio.operations.DedicatedInferencesOperations
:ivar domains: DomainsOperations operations
:vartype domains: pydo.aio.operations.DomainsOperations
:ivar droplets: DropletsOperations operations
@@ -609,6 +614,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too
:vartype firewalls: pydo.aio.operations.FirewallsOperations
:ivar functions: FunctionsOperations operations
:vartype functions: pydo.aio.operations.FunctionsOperations
+ :ivar functions_access_key: FunctionsAccessKeyOperations operations
+ :vartype functions_access_key: pydo.aio.operations.FunctionsAccessKeyOperations
:ivar images: ImagesOperations operations
:vartype images: pydo.aio.operations.ImagesOperations
:ivar image_actions: ImageActionsOperations operations
@@ -641,6 +648,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too
:vartype reserved_ipv6_actions: pydo.aio.operations.ReservedIPv6ActionsOperations
:ivar byoip_prefixes: ByoipPrefixesOperations operations
:vartype byoip_prefixes: pydo.aio.operations.ByoipPrefixesOperations
+ :ivar security: SecurityOperations operations
+ :vartype security: pydo.aio.operations.SecurityOperations
:ivar sizes: SizesOperations operations
:vartype sizes: pydo.aio.operations.SizesOperations
:ivar snapshots: SnapshotsOperations operations
@@ -746,6 +755,9 @@ def __init__(
self.databases = DatabasesOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.dedicated_inferences = DedicatedInferencesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.domains = DomainsOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -764,6 +776,9 @@ def __init__(
self.functions = FunctionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.functions_access_key = FunctionsAccessKeyOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.images = ImagesOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -812,6 +827,9 @@ def __init__(
self.byoip_prefixes = ByoipPrefixesOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.security = SecurityOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.sizes = SizesOperations(
self._client, self._config, self._serialize, self._deserialize
)
diff --git a/src/pydo/aio/operations/__init__.py b/src/pydo/aio/operations/__init__.py
index 4a74b7c..0c8c048 100644
--- a/src/pydo/aio/operations/__init__.py
+++ b/src/pydo/aio/operations/__init__.py
@@ -17,12 +17,14 @@
from ._operations import InvoicesOperations
from ._operations import BillingInsightsOperations
from ._operations import DatabasesOperations
+from ._operations import DedicatedInferencesOperations
from ._operations import DomainsOperations
from ._operations import DropletsOperations
from ._operations import DropletActionsOperations
from ._operations import AutoscalepoolsOperations
from ._operations import FirewallsOperations
from ._operations import FunctionsOperations
+from ._operations import FunctionsAccessKeyOperations
from ._operations import ImagesOperations
from ._operations import ImageActionsOperations
from ._operations import KubernetesOperations
@@ -39,6 +41,7 @@
from ._operations import ReservedIPv6Operations
from ._operations import ReservedIPv6ActionsOperations
from ._operations import ByoipPrefixesOperations
+from ._operations import SecurityOperations
from ._operations import SizesOperations
from ._operations import SnapshotsOperations
from ._operations import SpacesKeyOperations
@@ -70,12 +73,14 @@
"InvoicesOperations",
"BillingInsightsOperations",
"DatabasesOperations",
+ "DedicatedInferencesOperations",
"DomainsOperations",
"DropletsOperations",
"DropletActionsOperations",
"AutoscalepoolsOperations",
"FirewallsOperations",
"FunctionsOperations",
+ "FunctionsAccessKeyOperations",
"ImagesOperations",
"ImageActionsOperations",
"KubernetesOperations",
@@ -92,6 +97,7 @@
"ReservedIPv6Operations",
"ReservedIPv6ActionsOperations",
"ByoipPrefixesOperations",
+ "SecurityOperations",
"SizesOperations",
"SnapshotsOperations",
"SpacesKeyOperations",
diff --git a/src/pydo/aio/operations/_operations.py b/src/pydo/aio/operations/_operations.py
index e68d312..00b59bc 100644
--- a/src/pydo/aio/operations/_operations.py
+++ b/src/pydo/aio/operations/_operations.py
@@ -49,6 +49,7 @@
build_addons_patch_request,
build_apps_assign_alert_destinations_request,
build_apps_cancel_deployment_request,
+ build_apps_cancel_event_request,
build_apps_cancel_job_invocation_request,
build_apps_commit_rollback_request,
build_apps_create_deployment_request,
@@ -56,6 +57,8 @@
build_apps_create_rollback_request,
build_apps_delete_request,
build_apps_get_deployment_request,
+ build_apps_get_event_logs_request,
+ build_apps_get_event_request,
build_apps_get_exec_active_deployment_request,
build_apps_get_exec_request,
build_apps_get_health_request,
@@ -71,6 +74,7 @@
build_apps_get_request,
build_apps_list_alerts_request,
build_apps_list_deployments_request,
+ build_apps_list_events_request,
build_apps_list_instance_sizes_request,
build_apps_list_job_invocations_request,
build_apps_list_metrics_bandwidth_daily_request,
@@ -177,6 +181,19 @@
build_databases_update_region_request,
build_databases_update_sql_mode_request,
build_databases_update_user_request,
+ build_dedicated_inferences_create_request,
+ build_dedicated_inferences_create_tokens_request,
+ build_dedicated_inferences_delete_request,
+ build_dedicated_inferences_delete_tokens_request,
+ build_dedicated_inferences_get_accelerator_request,
+ build_dedicated_inferences_get_ca_request,
+ build_dedicated_inferences_get_gpu_model_config_request,
+ build_dedicated_inferences_get_request,
+ build_dedicated_inferences_list_accelerators_request,
+ build_dedicated_inferences_list_request,
+ build_dedicated_inferences_list_sizes_request,
+ build_dedicated_inferences_list_tokens_request,
+ build_dedicated_inferences_patch_request,
build_domains_create_record_request,
build_domains_create_request,
build_domains_delete_record_request,
@@ -221,6 +238,10 @@
build_firewalls_get_request,
build_firewalls_list_request,
build_firewalls_update_request,
+ build_functions_access_key_create_request,
+ build_functions_access_key_delete_request,
+ build_functions_access_key_list_request,
+ build_functions_access_key_update_request,
build_functions_create_namespace_request,
build_functions_create_trigger_request,
build_functions_delete_namespace_request,
@@ -522,6 +543,16 @@
build_reserved_ipv6_delete_request,
build_reserved_ipv6_get_request,
build_reserved_ipv6_list_request,
+ build_security_create_scan_request,
+ build_security_create_scan_rule_request,
+ build_security_create_suppression_request,
+ build_security_delete_suppression_request,
+ build_security_get_latest_scan_request,
+ build_security_get_scan_request,
+ build_security_list_scan_finding_affected_resources_request,
+ build_security_list_scans_request,
+ build_security_list_settings_request,
+ build_security_update_settings_plan_request,
build_sizes_list_request,
build_snapshots_delete_request,
build_snapshots_get_request,
@@ -75697,8 +75728,10 @@ async def get_logs_active_deployment(
* BUILD: Build-time logs
* DEPLOY: Deploy-time logs
* RUN: Live run-time logs
- * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are:
- "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED".
+ * RUN_RESTARTED: Logs of crashed/restarted instances during runtime
+ * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are:
+ "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value
+ is "UNSPECIFIED".
:paramtype type: str
:keyword pod_connection_timeout: An optional time duration to wait if the underlying component
instance is not immediately available. Default: ``3m``. Default value is None.
@@ -85545,8 +85578,10 @@ async def get_logs(
* BUILD: Build-time logs
* DEPLOY: Deploy-time logs
* RUN: Live run-time logs
- * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are:
- "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED".
+ * RUN_RESTARTED: Logs of crashed/restarted instances during runtime
+ * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are:
+ "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value
+ is "UNSPECIFIED".
:paramtype type: str
:keyword pod_connection_timeout: An optional time duration to wait if the underlying component
instance is not immediately available. Default: ``3m``. Default value is None.
@@ -85693,8 +85728,10 @@ async def get_logs_aggregate(
* BUILD: Build-time logs
* DEPLOY: Deploy-time logs
* RUN: Live run-time logs
- * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are:
- "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED".
+ * RUN_RESTARTED: Logs of crashed/restarted instances during runtime
+ * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are:
+ "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value
+ is "UNSPECIFIED".
:paramtype type: str
:keyword pod_connection_timeout: An optional time duration to wait if the underlying component
instance is not immediately available. Default: ``3m``. Default value is None.
@@ -85971,8 +86008,10 @@ async def get_logs_active_deployment_aggregate(
* BUILD: Build-time logs
* DEPLOY: Deploy-time logs
* RUN: Live run-time logs
- * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are:
- "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED".
+ * RUN_RESTARTED: Logs of crashed/restarted instances during runtime
+ * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are:
+ "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value
+ is "UNSPECIFIED".
:paramtype type: str
:keyword pod_connection_timeout: An optional time duration to wait if the underlying component
instance is not immediately available. Default: ``3m``. Default value is None.
@@ -86731,6 +86770,5301 @@ async def get_job_invocation_logs(
return cast(JSON, deserialized) # type: ignore
+ @distributed_trace_async
+ async def list_events(
+ self,
+ app_id: str,
+ *,
+ page: int = 1,
+ per_page: int = 20,
+ event_types: Optional[List[str]] = None,
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """List App Events.
+
+ List all events for an app, including deployments and autoscaling events.
+
+ :param app_id: The app ID. Required.
+ :type app_id: str
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword event_types: Filter events by event type. Default value is None.
+ :paramtype event_types: list[str]
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "events": [
+ {
+ "autoscaling": {
+ "components": {
+ "str": {
+ "from": 0, # Optional. The number of
+ replicas before scaling.
+ "to": 0, # Optional. The number of
+ replicas after scaling.
+ "triggering_metric": "str" #
+ Optional. The metric that triggered the scale change. Known
+ values are "cpu", "requests_per_second", "request_duration".
+ For inactivity sleep, "scale_from_zero" and "scale_to_zero"
+ are used.
+ }
+ },
+ "phase": "str" # Optional. The current phase of the
+ autoscaling event. Known values are: "UNKNOWN", "PENDING",
+ "IN_PROGRESS", "SUCCEEDED", "FAILED", and "CANCELED".
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. When the
+ event was created.
+ "deployment": {
+ "cause": "str", # Optional. What caused this
+ deployment to be created.
+ "cloned_from": "str", # Optional. The ID of a
+ previous deployment that this deployment was cloned from.
+ "created_at": "2020-02-20 00:00:00", # Optional. The
+ creation time of the deployment.
+ "functions": [
+ {
+ "name": "str", # Optional. The name
+ of this functions component.
+ "namespace": "str", # Optional. The
+ namespace where the functions are deployed.
+ "source_commit_hash": "str" #
+ Optional. The commit hash of the repository that was used to
+ build this functions component.
+ }
+ ],
+ "id": "str", # Optional. The ID of the deployment.
+ "jobs": [
+ {
+ "name": "str", # Optional. The name
+ of this job.
+ "source_commit_hash": "str" #
+ Optional. The commit hash of the repository that was used to
+ build this job.
+ }
+ ],
+ "phase": "UNKNOWN", # Optional. Default value is
+ "UNKNOWN". Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING",
+ "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and
+ "CANCELED".
+ "phase_last_updated_at": "2020-02-20 00:00:00", #
+ Optional. When the deployment phase was last updated.
+ "progress": {
+ "error_steps": 0, # Optional. Number of
+ unsuccessful steps.
+ "pending_steps": 0, # Optional. Number of
+ pending steps.
+ "running_steps": 0, # Optional. Number of
+ currently running steps.
+ "steps": [
+ {
+ "component_name": "str", #
+ Optional. The component name that this step is associated
+ with.
+ "ended_at": "2020-02-20
+ 00:00:00", # Optional. The end time of this step.
+ "message_base": "str", #
+ Optional. The base of a human-readable description of the
+ step intended to be combined with the component name for
+ presentation. For example: ``message_base`` = "Building
+ service" ``component_name`` = "api".
+ "name": "str", # Optional.
+ The name of this step.
+ "reason": {
+ "code": "str", #
+ Optional. The error code.
+ "message": "str" #
+ Optional. The error message.
+ },
+ "started_at": "2020-02-20
+ 00:00:00", # Optional. The start time of this step.
+ "status": "UNKNOWN", #
+ Optional. Default value is "UNKNOWN". Known values are:
+ "UNKNOWN", "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child
+ steps of this step.
+ ]
+ }
+ ],
+ "success_steps": 0, # Optional. Number of
+ successful steps.
+ "summary_steps": [
+ {
+ "component_name": "str", #
+ Optional. The component name that this step is associated
+ with.
+ "ended_at": "2020-02-20
+ 00:00:00", # Optional. The end time of this step.
+ "message_base": "str", #
+ Optional. The base of a human-readable description of the
+ step intended to be combined with the component name for
+ presentation. For example: ``message_base`` = "Building
+ service" ``component_name`` = "api".
+ "name": "str", # Optional.
+ The name of this step.
+ "reason": {
+ "code": "str", #
+ Optional. The error code.
+ "message": "str" #
+ Optional. The error message.
+ },
+ "started_at": "2020-02-20
+ 00:00:00", # Optional. The start time of this step.
+ "status": "UNKNOWN", #
+ Optional. Default value is "UNKNOWN". Known values are:
+ "UNKNOWN", "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child
+ steps of this step.
+ ]
+ }
+ ],
+ "total_steps": 0 # Optional. Total number of
+ steps.
+ },
+ "services": [
+ {
+ "name": "str", # Optional. The name
+ of this service.
+ "source_commit_hash": "str" #
+ Optional. The commit hash of the repository that was used to
+ build this service.
+ }
+ ],
+ "spec": {
+ "name": "str", # The name of the app. Must
+ be unique across all apps in the same account. Required.
+ "databases": [
+ {
+ "name": "str", # The
+ database's name. The name must be unique across all
+ components within the same app and cannot use capital
+ letters. Required.
+ "cluster_name": "str", #
+ Optional. The name of the underlying DigitalOcean DBaaS
+ cluster. This is required for production databases. For
+ dev databases, if cluster_name is not set, a new cluster
+ will be provisioned.
+ "db_name": "str", #
+ Optional. The name of the MySQL or PostgreSQL database to
+ configure.
+ "db_user": "str", #
+ Optional. The name of the MySQL or PostgreSQL user to
+ configure.
+ "engine": "UNSET", #
+ Optional. Default value is "UNSET". * MYSQL: MySQL * PG:
+ PostgreSQL * REDIS: Caching * MONGODB: MongoDB * KAFKA:
+ Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. Known
+ values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB",
+ "KAFKA", "OPENSEARCH", and "VALKEY".
+ "production": bool, #
+ Optional. Whether this is a production or dev database.
+ "version": "str" # Optional.
+ The version of the database engine.
+ }
+ ],
+ "disable_edge_cache": False, # Optional.
+ Default value is False. .. role:: raw-html-m2r(raw) :format:
+ html If set to ``true``"" , the app will **not** be cached at
+ the edge (CDN). Enable this option if you want to manage CDN
+ configuration yourself"u2014whether by using an external CDN
+ provider or by handling static content and caching within your
+ app. This setting is also recommended for apps that require
+ real-time data or serve dynamic content, such as those using
+ Server-Sent Events (SSE) over GET, or hosting an MCP (Model
+ Context Protocol) Server that utilizes SSE.""
+ :raw-html-m2r:`
` **Note:** This feature is not available for
+ static site components."" :raw-html-m2r:`
` For more
+ information, see `Disable CDN Cache
+ `_.
+ "disable_email_obfuscation": False, #
+ Optional. Default value is False. If set to ``true``"" , email
+ addresses in the app will not be obfuscated. This is useful for
+ apps that require email addresses to be visible (in the HTML
+ markup).
+ "domains": [
+ {
+ "domain": "str", # The
+ hostname for the domain. Required.
+ "minimum_tls_version": "str",
+ # Optional. The minimum version of TLS a client
+ application can use to access resources for the domain.
+ Must be one of the following values wrapped within
+ quotations: ``"1.2"`` or ``"1.3"``. Known values are:
+ "1.2" and "1.3".
+ "type": "UNSPECIFIED", #
+ Optional. Default value is "UNSPECIFIED". * DEFAULT: The
+ default ``.ondigitalocean.app`` domain assigned to this
+ app * PRIMARY: The primary domain for this app that is
+ displayed as the default in the control panel, used in
+ bindable environment variables, and any other places that
+ reference an app's live URL. Only one domain may be set
+ as primary. * ALIAS: A non-primary domain. Known values
+ are: "UNSPECIFIED", "DEFAULT", "PRIMARY", and "ALIAS".
+ "wildcard": bool, #
+ Optional. Indicates whether the domain includes all
+ sub-domains, in addition to the given domain.
+ "zone": "str" # Optional.
+ Optional. If the domain uses DigitalOcean DNS and you
+ would like App Platform to automatically manage it for
+ you, set this to the name of the domain on your account.
+ For example, If the domain you are adding is
+ ``app.domain.com``"" , the zone could be ``domain.com``.
+ }
+ ],
+ "egress": {
+ "type": "AUTOASSIGN" # Optional.
+ Default value is "AUTOASSIGN". The app egress type. Known
+ values are: "AUTOASSIGN" and "DEDICATED_IP".
+ },
+ "enhanced_threat_control_enabled": False, #
+ Optional. Default value is False. If set to ``true``"" ,
+ suspicious requests will go through additional security checks to
+ help mitigate layer 7 DDoS attacks.
+ "functions": [
+ {
+ "name": "str", # The name.
+ Must be unique across all components within the same app.
+ Required.
+ "alerts": [
+ {
+ "disabled":
+ bool, # Optional. Is the alert disabled?.
+ "operator":
+ "UNSPECIFIED_OPERATOR", # Optional. Default
+ value is "UNSPECIFIED_OPERATOR". Known values
+ are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
+ "LESS_THAN".
+ "rule":
+ "UNSPECIFIED_RULE", # Optional. Default value is
+ "UNSPECIFIED_RULE". Known values are:
+ "UNSPECIFIED_RULE", "CPU_UTILIZATION",
+ "MEM_UTILIZATION", "RESTART_COUNT",
+ "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE",
+ "DOMAIN_FAILED", "DOMAIN_LIVE",
+ "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED",
+ "FUNCTIONS_ACTIVATION_COUNT",
+ "FUNCTIONS_AVERAGE_DURATION_MS",
+ "FUNCTIONS_ERROR_RATE_PER_MINUTE",
+ "FUNCTIONS_AVERAGE_WAIT_TIME_MS",
+ "FUNCTIONS_ERROR_COUNT", and
+ "FUNCTIONS_GB_RATE_PER_SECOND".
+ "value": 0.0,
+ # Optional. Threshold value for alert.
+ "window":
+ "UNSPECIFIED_WINDOW" # Optional. Default value
+ is "UNSPECIFIED_WINDOW". Known values are:
+ "UNSPECIFIED_WINDOW", "FIVE_MINUTES",
+ "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR".
+ }
+ ],
+ "bitbucket": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "cors": {
+ "allow_credentials":
+ bool, # Optional. Whether browsers should expose the
+ response to the client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" #
+ Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" #
+ Optional. The set of allowed HTTP methods. This
+ configures the ``Access-Control-Allow-Methods``
+ header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "prefix": "str", # Optional. Prefix-based
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For
+ more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" #
+ Optional. The set of HTTP response headers that
+ browsers are allowed to access. This configures
+ the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" #
+ Optional. An optional duration specifying how long
+ browsers can cache the results of a preflight
+ request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "envs": [
+ {
+ "key": "str",
+ # The variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value
+ is "RUN_AND_BUILD_TIME". * RUN_TIME: Made
+ available only at run-time * BUILD_TIME: Made
+ available only at build-time *
+ RUN_AND_BUILD_TIME: Made available at both build
+ and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and
+ "RUN_AND_BUILD_TIME".
+ "type":
+ "GENERAL", # Optional. Default value is
+ "GENERAL". * GENERAL: A plain-text environment
+ variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and
+ "SECRET".
+ "value":
+ "str" # Optional. The value. If the type is
+ ``SECRET``"" , the value will be encrypted on
+ first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "repo_clone_url":
+ "str" # Optional. The clone URL of the repo.
+ Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "log_destinations": [
+ {
+ "name":
+ "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog API key.
+ Required.
+ "endpoint": "str" # Optional. Datadog HTTP
+ log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", # Optional. Password
+ for user defined in User. Is required
+ when ``endpoint`` is set. Cannot be set
+ if using a DigitalOcean DBaaS OpenSearch
+ cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name
+ of a DigitalOcean DBaaS OpenSearch cluster to
+ use as a log forwarding destination. Cannot
+ be specified if ``endpoint`` is also
+ specified.
+ "endpoint": "str", # Optional. OpenSearch
+ API Endpoint. Only HTTPS is supported.
+ Format:
+ https://:code:``::code:``. Cannot
+ be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" # Optional. Default
+ value is "logs". The index name to use for
+ the logs. If not set, the default index name
+ is "logs".
+ },
+ "papertrail":
+ {
+ "endpoint": "str" # Papertrail syslog
+ endpoint. Required.
+ }
+ }
+ ],
+ "routes": [
+ {
+ "path":
+ "str", # Optional. (Deprecated - Use Ingress
+ Rules instead). An HTTP path prefix. Paths must
+ start with / and must be unique across all
+ components within an app.
+ "preserve_path_prefix": bool # Optional. An
+ optional flag to preserve the path that is
+ forwarded to the backend service. By default, the
+ HTTP request path will be trimmed from the left
+ when forwarded to the component. For example, a
+ component with ``path=/api`` will have requests
+ to ``/api/list`` trimmed to ``/list``. If this
+ value is ``true``"" , the path will remain
+ ``/api/list``.
+ }
+ ],
+ "source_dir": "str" #
+ Optional. An optional path to the working directory to
+ use for the build. For Dockerfile builds, this will be
+ used as the build context. Must be relative to the root
+ of the repo.
+ }
+ ],
+ "ingress": {
+ "rules": [
+ {
+ "component": {
+ "name":
+ "str", # The name of the component to route to.
+ Required.
+ "preserve_path_prefix": "str", # Optional. An
+ optional flag to preserve the path that is
+ forwarded to the backend service. By default, the
+ HTTP request path will be trimmed from the left
+ when forwarded to the component. For example, a
+ component with ``path=/api`` will have requests
+ to ``/api/list`` trimmed to ``/list``. If this
+ value is ``true``"" , the path will remain
+ ``/api/list``. Note: this is not applicable for
+ Functions Components and is mutually exclusive
+ with ``rewrite``.
+ "rewrite":
+ "str" # Optional. An optional field that will
+ rewrite the path of the component to be what is
+ specified here. By default, the HTTP request path
+ will be trimmed from the left when forwarded to
+ the component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If you specified the
+ rewrite to be ``/v1/``"" , requests to
+ ``/api/list`` would be rewritten to ``/v1/list``.
+ Note: this is mutually exclusive with
+ ``preserve_path_prefix``.
+ },
+ "cors": {
+ "allow_credentials": bool, # Optional. Whether
+ browsers should expose the response to the
+ client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str"
+ # Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str"
+ # Optional. The set of allowed HTTP methods.
+ This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set.
+ "prefix": "str", # Optional.
+ Prefix-based match. Only 1 of ``exact``""
+ , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``""
+ , ``prefix``"" , or ``regex`` must be
+ set. For more information about RE2
+ syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str"
+ # Optional. The set of HTTP response headers
+ that browsers are allowed to access. This
+ configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age":
+ "str" # Optional. An optional duration
+ specifying how long browsers can cache the
+ results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "match": {
+ "authority":
+ {
+ "exact": "str" # Required.
+ },
+ "path": {
+ "prefix": "str" # Prefix-based match. For
+ example, ``/api`` will match ``/api``"" ,
+ ``/api/``"" , and any nested paths such as
+ ``/api/v1/endpoint``. Required.
+ }
+ },
+ "redirect": {
+ "authority":
+ "str", # Optional. The authority/host to
+ redirect to. This can be a hostname or IP
+ address. Note: use ``port`` to set the port.
+ "port": 0, #
+ Optional. The port to redirect to.
+ "redirect_code": 0, # Optional. The redirect
+ code to use. Defaults to ``302``. Supported
+ values are 300, 301, 302, 303, 304, 307, 308.
+ "scheme":
+ "str", # Optional. The scheme to redirect to.
+ Supported values are ``http`` or ``https``.
+ Default: ``https``.
+ "uri": "str"
+ # Optional. An optional URI path to redirect to.
+ Note: if this is specified the whole URI of the
+ original request will be overwritten to this
+ value, irrespective of the original request URI
+ being matched.
+ }
+ }
+ ]
+ },
+ "jobs": [
+ {
+ "autoscaling": {
+ "max_instance_count":
+ 0, # Optional. The maximum amount of instances for
+ this component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80 # Optional. Default value is
+ 80. The average target CPU utilization for
+ the component.
+ }
+ },
+ "min_instance_count":
+ 0 # Optional. The minimum amount of instances for
+ this component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", #
+ Optional. An optional build command to run while building
+ this component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root
+ of the repo. If set, it will be used to build this
+ component. Otherwise, App Platform will attempt to build
+ it using buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str",
+ # The variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value
+ is "RUN_AND_BUILD_TIME". * RUN_TIME: Made
+ available only at run-time * BUILD_TIME: Made
+ available only at build-time *
+ RUN_AND_BUILD_TIME: Made available at both build
+ and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and
+ "RUN_AND_BUILD_TIME".
+ "type":
+ "GENERAL", # Optional. Default value is
+ "GENERAL". * GENERAL: A plain-text environment
+ variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and
+ "SECRET".
+ "value":
+ "str" # Optional. The value. If the type is
+ ``SECRET``"" , the value will be encrypted on
+ first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "repo_clone_url":
+ "str" # Optional. The clone URL of the repo.
+ Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled":
+ bool # Optional. Whether to automatically deploy
+ new images. Can only be used for images hosted in
+ DOCR and can only be used with an image tag, not
+ a specific digest.
+ },
+ "digest": "str", #
+ Optional. The image digest. Cannot be specified if
+ tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for
+ the ``DOCR`` registry type.
+ "registry_credentials": "str", # Optional. The
+ credentials to be able to pull the image. The value
+ will be encrypted on first submission. On following
+ submissions, the encrypted value should be used. *
+ "$username:$access_token" for registries of type
+ ``DOCKER_HUB``. * "$username:$access_token" for
+ registries of type ``GHCR``.
+ "registry_type":
+ "str", # Optional. * DOCKER_HUB: The DockerHub
+ container registry type. * DOCR: The DigitalOcean
+ container registry type. * GHCR: The Github container
+ registry type. Known values are: "DOCKER_HUB",
+ "DOCR", and "GHCR".
+ "repository": "str",
+ # Optional. The repository name.
+ "tag": "latest" #
+ Optional. Default value is "latest". The repository
+ tag. Defaults to ``latest`` if not provided and no
+ digest is provided. Cannot be specified if digest is
+ provided.
+ },
+ "instance_count": 1, #
+ Optional. Default value is 1. The amount of instances
+ that this component should be scaled to. Default: 1. Must
+ not be set if autoscaling is used.
+ "instance_size_slug": {},
+ "kind": "UNSPECIFIED", #
+ Optional. Default value is "UNSPECIFIED". * UNSPECIFIED:
+ Default job type, will auto-complete to POST_DEPLOY kind.
+ * PRE_DEPLOY: Indicates a job that runs before an app
+ deployment. * POST_DEPLOY: Indicates a job that runs
+ after an app deployment. * FAILED_DEPLOY: Indicates a job
+ that runs after a component fails to deploy. Known values
+ are: "UNSPECIFIED", "PRE_DEPLOY", "POST_DEPLOY", and
+ "FAILED_DEPLOY".
+ "log_destinations": [
+ {
+ "name":
+ "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog API key.
+ Required.
+ "endpoint": "str" # Optional. Datadog HTTP
+ log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", # Optional. Password
+ for user defined in User. Is required
+ when ``endpoint`` is set. Cannot be set
+ if using a DigitalOcean DBaaS OpenSearch
+ cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name
+ of a DigitalOcean DBaaS OpenSearch cluster to
+ use as a log forwarding destination. Cannot
+ be specified if ``endpoint`` is also
+ specified.
+ "endpoint": "str", # Optional. OpenSearch
+ API Endpoint. Only HTTPS is supported.
+ Format:
+ https://:code:``::code:``. Cannot
+ be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" # Optional. Default
+ value is "logs". The index name to use for
+ the logs. If not set, the default index name
+ is "logs".
+ },
+ "papertrail":
+ {
+ "endpoint": "str" # Papertrail syslog
+ endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional.
+ The name. Must be unique across all components within the
+ same app.
+ "run_command": "str", #
+ Optional. An optional run command to override the
+ component's default.
+ "source_dir": "str", #
+ Optional. An optional path to the working directory to
+ use for the build. For Dockerfile builds, this will be
+ used as the build context. Must be relative to the root
+ of the repo.
+ "termination": {
+ "grace_period_seconds": 0 # Optional. The number of
+ seconds to wait between sending a TERM signal to a
+ container and issuing a KILL which causes immediate
+ shutdown. (Default 120).
+ }
+ }
+ ],
+ "maintenance": {
+ "archive": bool, # Optional.
+ Indicates whether the app should be archived. Setting this to
+ true implies that enabled is set to true.
+ "enabled": bool, # Optional.
+ Indicates whether maintenance mode should be enabled for the
+ app.
+ "offline_page_url": "str" #
+ Optional. A custom offline page to display when maintenance
+ mode is enabled or the app is archived.
+ },
+ "region": "str", # Optional. The slug form
+ of the geographical origin of the app. Default: ``nearest
+ available``. Known values are: "atl", "nyc", "sfo", "tor", "ams",
+ "fra", "lon", "blr", "sgp", and "syd".
+ "services": [
+ {
+ "autoscaling": {
+ "max_instance_count":
+ 0, # Optional. The maximum amount of instances for
+ this component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80 # Optional. Default value is
+ 80. The average target CPU utilization for
+ the component.
+ }
+ },
+ "min_instance_count":
+ 0 # Optional. The minimum amount of instances for
+ this component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", #
+ Optional. An optional build command to run while building
+ this component from source.
+ "cors": {
+ "allow_credentials":
+ bool, # Optional. Whether browsers should expose the
+ response to the client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" #
+ Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" #
+ Optional. The set of allowed HTTP methods. This
+ configures the ``Access-Control-Allow-Methods``
+ header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "prefix": "str", # Optional. Prefix-based
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For
+ more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" #
+ Optional. The set of HTTP response headers that
+ browsers are allowed to access. This configures
+ the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" #
+ Optional. An optional duration specifying how long
+ browsers can cache the results of a preflight
+ request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root
+ of the repo. If set, it will be used to build this
+ component. Otherwise, App Platform will attempt to build
+ it using buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str",
+ # The variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value
+ is "RUN_AND_BUILD_TIME". * RUN_TIME: Made
+ available only at run-time * BUILD_TIME: Made
+ available only at build-time *
+ RUN_AND_BUILD_TIME: Made available at both build
+ and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and
+ "RUN_AND_BUILD_TIME".
+ "type":
+ "GENERAL", # Optional. Default value is
+ "GENERAL". * GENERAL: A plain-text environment
+ variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and
+ "SECRET".
+ "value":
+ "str" # Optional. The value. If the type is
+ ``SECRET``"" , the value will be encrypted on
+ first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "repo_clone_url":
+ "str" # Optional. The clone URL of the repo.
+ Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "health_check": {
+ "failure_threshold":
+ 0, # Optional. The number of failed health checks
+ before considered unhealthy.
+ "http_path": "str",
+ # Optional. The route path used for the HTTP health
+ check ping. If not set, the HTTP health check will be
+ disabled and a TCP health check used instead.
+ "initial_delay_seconds": 0, # Optional. The number
+ of seconds to wait before beginning health checks.
+ "period_seconds": 0,
+ # Optional. The number of seconds to wait between
+ health checks.
+ "port": 0, #
+ Optional. The port on which the health check will be
+ performed. If not set, the health check will be
+ performed on the component's http_port.
+ "success_threshold":
+ 0, # Optional. The number of successful health
+ checks before considered healthy.
+ "timeout_seconds": 0
+ # Optional. The number of seconds after which the
+ check times out.
+ },
+ "http_port": 0, # Optional.
+ The internal port on which this service's run command
+ will listen. Default: 8080 If there is not an environment
+ variable with the name ``PORT``"" , one will be
+ automatically added with its value set to the value of
+ this field.
+ "image": {
+ "deploy_on_push": {
+ "enabled":
+ bool # Optional. Whether to automatically deploy
+ new images. Can only be used for images hosted in
+ DOCR and can only be used with an image tag, not
+ a specific digest.
+ },
+ "digest": "str", #
+ Optional. The image digest. Cannot be specified if
+ tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for
+ the ``DOCR`` registry type.
+ "registry_credentials": "str", # Optional. The
+ credentials to be able to pull the image. The value
+ will be encrypted on first submission. On following
+ submissions, the encrypted value should be used. *
+ "$username:$access_token" for registries of type
+ ``DOCKER_HUB``. * "$username:$access_token" for
+ registries of type ``GHCR``.
+ "registry_type":
+ "str", # Optional. * DOCKER_HUB: The DockerHub
+ container registry type. * DOCR: The DigitalOcean
+ container registry type. * GHCR: The Github container
+ registry type. Known values are: "DOCKER_HUB",
+ "DOCR", and "GHCR".
+ "repository": "str",
+ # Optional. The repository name.
+ "tag": "latest" #
+ Optional. Default value is "latest". The repository
+ tag. Defaults to ``latest`` if not provided and no
+ digest is provided. Cannot be specified if digest is
+ provided.
+ },
+ "instance_count": 1, #
+ Optional. Default value is 1. The amount of instances
+ that this component should be scaled to. Default: 1. Must
+ not be set if autoscaling is used.
+ "instance_size_slug": {},
+ "internal_ports": [
+ 0 # Optional. The
+ ports on which this service will listen for internal
+ traffic.
+ ],
+ "liveness_health_check": {
+ "failure_threshold":
+ 0, # Optional. The number of failed health checks
+ before considered unhealthy.
+ "http_path": "str",
+ # Optional. The route path used for the HTTP health
+ check ping. If not set, the HTTP health check will be
+ disabled and a TCP health check used instead.
+ "initial_delay_seconds": 0, # Optional. The number
+ of seconds to wait before beginning health checks.
+ "period_seconds": 0,
+ # Optional. The number of seconds to wait between
+ health checks.
+ "port": 0, #
+ Optional. The port on which the health check will be
+ performed.
+ "success_threshold":
+ 0, # Optional. The number of successful health
+ checks before considered healthy.
+ "timeout_seconds": 0
+ # Optional. The number of seconds after which the
+ check times out.
+ },
+ "log_destinations": [
+ {
+ "name":
+ "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog API key.
+ Required.
+ "endpoint": "str" # Optional. Datadog HTTP
+ log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", # Optional. Password
+ for user defined in User. Is required
+ when ``endpoint`` is set. Cannot be set
+ if using a DigitalOcean DBaaS OpenSearch
+ cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name
+ of a DigitalOcean DBaaS OpenSearch cluster to
+ use as a log forwarding destination. Cannot
+ be specified if ``endpoint`` is also
+ specified.
+ "endpoint": "str", # Optional. OpenSearch
+ API Endpoint. Only HTTPS is supported.
+ Format:
+ https://:code:``::code:``. Cannot
+ be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" # Optional. Default
+ value is "logs". The index name to use for
+ the logs. If not set, the default index name
+ is "logs".
+ },
+ "papertrail":
+ {
+ "endpoint": "str" # Papertrail syslog
+ endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional.
+ The name. Must be unique across all components within the
+ same app.
+ "protocol": "str", #
+ Optional. The protocol which the service uses to serve
+ traffic on the http_port. * ``HTTP``"" : The app is
+ serving the HTTP protocol. Default. * ``HTTP2``"" : The
+ app is serving the HTTP/2 protocol. Currently, this needs
+ to be implemented in the service by serving HTTP/2
+ cleartext (h2c). Known values are: "HTTP" and "HTTP2".
+ "routes": [
+ {
+ "path":
+ "str", # Optional. (Deprecated - Use Ingress
+ Rules instead). An HTTP path prefix. Paths must
+ start with / and must be unique across all
+ components within an app.
+ "preserve_path_prefix": bool # Optional. An
+ optional flag to preserve the path that is
+ forwarded to the backend service. By default, the
+ HTTP request path will be trimmed from the left
+ when forwarded to the component. For example, a
+ component with ``path=/api`` will have requests
+ to ``/api/list`` trimmed to ``/list``. If this
+ value is ``true``"" , the path will remain
+ ``/api/list``.
+ }
+ ],
+ "run_command": "str", #
+ Optional. An optional run command to override the
+ component's default.
+ "source_dir": "str", #
+ Optional. An optional path to the working directory to
+ use for the build. For Dockerfile builds, this will be
+ used as the build context. Must be relative to the root
+ of the repo.
+ "termination": {
+ "drain_seconds": 0,
+ # Optional. The number of seconds to wait between
+ selecting a container instance for termination and
+ issuing the TERM signal. Selecting a container
+ instance for termination begins an asynchronous drain
+ of new requests on upstream load-balancers. (Default
+ 15).
+ "grace_period_seconds": 0 # Optional. The number of
+ seconds to wait between sending a TERM signal to a
+ container and issuing a KILL which causes immediate
+ shutdown. (Default 120).
+ }
+ }
+ ],
+ "static_sites": [
+ {
+ "bitbucket": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", #
+ Optional. An optional build command to run while building
+ this component from source.
+ "catchall_document": "str",
+ # Optional. The name of the document to use as the
+ fallback for any requests to documents that are not found
+ when serving this static site. Only 1 of
+ ``catchall_document`` or ``error_document`` can be set.
+ "cors": {
+ "allow_credentials":
+ bool, # Optional. Whether browsers should expose the
+ response to the client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" #
+ Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" #
+ Optional. The set of allowed HTTP methods. This
+ configures the ``Access-Control-Allow-Methods``
+ header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "prefix": "str", # Optional. Prefix-based
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For
+ more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" #
+ Optional. The set of HTTP response headers that
+ browsers are allowed to access. This configures
+ the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" #
+ Optional. An optional duration specifying how long
+ browsers can cache the results of a preflight
+ request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root
+ of the repo. If set, it will be used to build this
+ component. Otherwise, App Platform will attempt to build
+ it using buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str",
+ # The variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value
+ is "RUN_AND_BUILD_TIME". * RUN_TIME: Made
+ available only at run-time * BUILD_TIME: Made
+ available only at build-time *
+ RUN_AND_BUILD_TIME: Made available at both build
+ and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and
+ "RUN_AND_BUILD_TIME".
+ "type":
+ "GENERAL", # Optional. Default value is
+ "GENERAL". * GENERAL: A plain-text environment
+ variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and
+ "SECRET".
+ "value":
+ "str" # Optional. The value. If the type is
+ ``SECRET``"" , the value will be encrypted on
+ first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "error_document": "404.html",
+ # Optional. Default value is "404.html". The name of the
+ error document to use when serving this static site.
+ Default: 404.html. If no such file exists within the
+ built assets, App Platform will supply one.
+ "git": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "repo_clone_url":
+ "str" # Optional. The clone URL of the repo.
+ Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled":
+ bool # Optional. Whether to automatically deploy
+ new images. Can only be used for images hosted in
+ DOCR and can only be used with an image tag, not
+ a specific digest.
+ },
+ "digest": "str", #
+ Optional. The image digest. Cannot be specified if
+ tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for
+ the ``DOCR`` registry type.
+ "registry_credentials": "str", # Optional. The
+ credentials to be able to pull the image. The value
+ will be encrypted on first submission. On following
+ submissions, the encrypted value should be used. *
+ "$username:$access_token" for registries of type
+ ``DOCKER_HUB``. * "$username:$access_token" for
+ registries of type ``GHCR``.
+ "registry_type":
+ "str", # Optional. * DOCKER_HUB: The DockerHub
+ container registry type. * DOCR: The DigitalOcean
+ container registry type. * GHCR: The Github container
+ registry type. Known values are: "DOCKER_HUB",
+ "DOCR", and "GHCR".
+ "repository": "str",
+ # Optional. The repository name.
+ "tag": "latest" #
+ Optional. Default value is "latest". The repository
+ tag. Defaults to ``latest`` if not provided and no
+ digest is provided. Cannot be specified if digest is
+ provided.
+ },
+ "index_document":
+ "index.html", # Optional. Default value is "index.html".
+ The name of the index document to use when serving this
+ static site. Default: index.html.
+ "log_destinations": [
+ {
+ "name":
+ "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog API key.
+ Required.
+ "endpoint": "str" # Optional. Datadog HTTP
+ log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", # Optional. Password
+ for user defined in User. Is required
+ when ``endpoint`` is set. Cannot be set
+ if using a DigitalOcean DBaaS OpenSearch
+ cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name
+ of a DigitalOcean DBaaS OpenSearch cluster to
+ use as a log forwarding destination. Cannot
+ be specified if ``endpoint`` is also
+ specified.
+ "endpoint": "str", # Optional. OpenSearch
+ API Endpoint. Only HTTPS is supported.
+ Format:
+ https://:code:``::code:``. Cannot
+ be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" # Optional. Default
+ value is "logs". The index name to use for
+ the logs. If not set, the default index name
+ is "logs".
+ },
+ "papertrail":
+ {
+ "endpoint": "str" # Papertrail syslog
+ endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional.
+ The name. Must be unique across all components within the
+ same app.
+ "output_dir": "str", #
+ Optional. An optional path to where the built assets will
+ be located, relative to the build context. If not set,
+ App Platform will automatically scan for these directory
+ names: ``_static``"" , ``dist``"" , ``public``"" ,
+ ``build``.
+ "routes": [
+ {
+ "path":
+ "str", # Optional. (Deprecated - Use Ingress
+ Rules instead). An HTTP path prefix. Paths must
+ start with / and must be unique across all
+ components within an app.
+ "preserve_path_prefix": bool # Optional. An
+ optional flag to preserve the path that is
+ forwarded to the backend service. By default, the
+ HTTP request path will be trimmed from the left
+ when forwarded to the component. For example, a
+ component with ``path=/api`` will have requests
+ to ``/api/list`` trimmed to ``/list``. If this
+ value is ``true``"" , the path will remain
+ ``/api/list``.
+ }
+ ],
+ "run_command": "str", #
+ Optional. An optional run command to override the
+ component's default.
+ "source_dir": "str" #
+ Optional. An optional path to the working directory to
+ use for the build. For Dockerfile builds, this will be
+ used as the build context. Must be relative to the root
+ of the repo.
+ }
+ ],
+ "vpc": {
+ "egress_ips": [
+ {
+ "ip": "str" #
+ Optional. The egress ips associated with the VPC.
+ }
+ ],
+ "id": "str" # Optional. The ID of
+ the VPC.
+ },
+ "workers": [
+ {
+ "autoscaling": {
+ "max_instance_count":
+ 0, # Optional. The maximum amount of instances for
+ this component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80 # Optional. Default value is
+ 80. The average target CPU utilization for
+ the component.
+ }
+ },
+ "min_instance_count":
+ 0 # Optional. The minimum amount of instances for
+ this component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", #
+ Optional. An optional build command to run while building
+ this component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root
+ of the repo. If set, it will be used to build this
+ component. Otherwise, App Platform will attempt to build
+ it using buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str",
+ # The variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value
+ is "RUN_AND_BUILD_TIME". * RUN_TIME: Made
+ available only at run-time * BUILD_TIME: Made
+ available only at build-time *
+ RUN_AND_BUILD_TIME: Made available at both build
+ and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and
+ "RUN_AND_BUILD_TIME".
+ "type":
+ "GENERAL", # Optional. Default value is
+ "GENERAL". * GENERAL: A plain-text environment
+ variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and
+ "SECRET".
+ "value":
+ "str" # Optional. The value. If the type is
+ ``SECRET``"" , the value will be encrypted on
+ first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "repo_clone_url":
+ "str" # Optional. The clone URL of the repo.
+ Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled":
+ bool # Optional. Whether to automatically deploy
+ new images. Can only be used for images hosted in
+ DOCR and can only be used with an image tag, not
+ a specific digest.
+ },
+ "digest": "str", #
+ Optional. The image digest. Cannot be specified if
+ tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for
+ the ``DOCR`` registry type.
+ "registry_credentials": "str", # Optional. The
+ credentials to be able to pull the image. The value
+ will be encrypted on first submission. On following
+ submissions, the encrypted value should be used. *
+ "$username:$access_token" for registries of type
+ ``DOCKER_HUB``. * "$username:$access_token" for
+ registries of type ``GHCR``.
+ "registry_type":
+ "str", # Optional. * DOCKER_HUB: The DockerHub
+ container registry type. * DOCR: The DigitalOcean
+ container registry type. * GHCR: The Github container
+ registry type. Known values are: "DOCKER_HUB",
+ "DOCR", and "GHCR".
+ "repository": "str",
+ # Optional. The repository name.
+ "tag": "latest" #
+ Optional. Default value is "latest". The repository
+ tag. Defaults to ``latest`` if not provided and no
+ digest is provided. Cannot be specified if digest is
+ provided.
+ },
+ "instance_count": 1, #
+ Optional. Default value is 1. The amount of instances
+ that this component should be scaled to. Default: 1. Must
+ not be set if autoscaling is used.
+ "instance_size_slug": {},
+ "liveness_health_check": {
+ "failure_threshold":
+ 0, # Optional. The number of failed health checks
+ before considered unhealthy.
+ "http_path": "str",
+ # Optional. The route path used for the HTTP health
+ check ping. If not set, the HTTP health check will be
+ disabled and a TCP health check used instead.
+ "initial_delay_seconds": 0, # Optional. The number
+ of seconds to wait before beginning health checks.
+ "period_seconds": 0,
+ # Optional. The number of seconds to wait between
+ health checks.
+ "port": 0, #
+ Optional. The port on which the health check will be
+ performed.
+ "success_threshold":
+ 0, # Optional. The number of successful health
+ checks before considered healthy.
+ "timeout_seconds": 0
+ # Optional. The number of seconds after which the
+ check times out.
+ },
+ "log_destinations": [
+ {
+ "name":
+ "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog API key.
+ Required.
+ "endpoint": "str" # Optional. Datadog HTTP
+ log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", # Optional. Password
+ for user defined in User. Is required
+ when ``endpoint`` is set. Cannot be set
+ if using a DigitalOcean DBaaS OpenSearch
+ cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name
+ of a DigitalOcean DBaaS OpenSearch cluster to
+ use as a log forwarding destination. Cannot
+ be specified if ``endpoint`` is also
+ specified.
+ "endpoint": "str", # Optional. OpenSearch
+ API Endpoint. Only HTTPS is supported.
+ Format:
+ https://:code:``::code:``. Cannot
+ be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" # Optional. Default
+ value is "logs". The index name to use for
+ the logs. If not set, the default index name
+ is "logs".
+ },
+ "papertrail":
+ {
+ "endpoint": "str" # Papertrail syslog
+ endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional.
+ The name. Must be unique across all components within the
+ same app.
+ "run_command": "str", #
+ Optional. An optional run command to override the
+ component's default.
+ "source_dir": "str", #
+ Optional. An optional path to the working directory to
+ use for the build. For Dockerfile builds, this will be
+ used as the build context. Must be relative to the root
+ of the repo.
+ "termination": {
+ "grace_period_seconds": 0 # Optional. The number of
+ seconds to wait between sending a TERM signal to a
+ container and issuing a KILL which causes immediate
+ shutdown. (Default 120).
+ }
+ }
+ ]
+ },
+ "static_sites": [
+ {
+ "name": "str", # Optional. The name
+ of this static site.
+ "source_commit_hash": "str" #
+ Optional. The commit hash of the repository that was used to
+ build this static site.
+ }
+ ],
+ "tier_slug": "str", # Optional. The current pricing
+ tier slug of the deployment.
+ "updated_at": "2020-02-20 00:00:00", # Optional.
+ When the deployment was last updated.
+ "workers": [
+ {
+ "name": "str", # Optional. The name
+ of this worker.
+ "source_commit_hash": "str" #
+ Optional. The commit hash of the repository that was used to
+ build this worker.
+ }
+ ]
+ },
+ "deployment_id": "str", # Optional. For deployment events,
+ this is the same as the deployment's ID. For autoscaling events, this is
+ the deployment that was autoscaled.
+ "id": "str", # Optional. The ID of the event (UUID).
+ "type": "str" # Optional. The type of event. Known values
+ are: "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING".
+ }
+ ],
+ "links": {
+ "pages": {}
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_list_events_request(
+ app_id=app_id,
+ page=page,
+ per_page=per_page,
+ event_types=event_types,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def get_event(self, app_id: str, event_id: str, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """Get an Event.
+
+ Get a single event for an app.
+
+ :param app_id: The app ID. Required.
+ :type app_id: str
+ :param event_id: The event ID. Required.
+ :type event_id: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "event": {
+ "autoscaling": {
+ "components": {
+ "str": {
+ "from": 0, # Optional. The number of
+ replicas before scaling.
+ "to": 0, # Optional. The number of replicas
+ after scaling.
+ "triggering_metric": "str" # Optional. The
+ metric that triggered the scale change. Known values are "cpu",
+ "requests_per_second", "request_duration". For inactivity sleep,
+ "scale_from_zero" and "scale_to_zero" are used.
+ }
+ },
+ "phase": "str" # Optional. The current phase of the
+ autoscaling event. Known values are: "UNKNOWN", "PENDING", "IN_PROGRESS",
+ "SUCCEEDED", "FAILED", and "CANCELED".
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. When the event was
+ created.
+ "deployment": {
+ "cause": "str", # Optional. What caused this deployment to
+ be created.
+ "cloned_from": "str", # Optional. The ID of a previous
+ deployment that this deployment was cloned from.
+ "created_at": "2020-02-20 00:00:00", # Optional. The
+ creation time of the deployment.
+ "functions": [
+ {
+ "name": "str", # Optional. The name of this
+ functions component.
+ "namespace": "str", # Optional. The
+ namespace where the functions are deployed.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this
+ functions component.
+ }
+ ],
+ "id": "str", # Optional. The ID of the deployment.
+ "jobs": [
+ {
+ "name": "str", # Optional. The name of this
+ job.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this job.
+ }
+ ],
+ "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN".
+ Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING",
+ "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and
+ "CANCELED".
+ "phase_last_updated_at": "2020-02-20 00:00:00", # Optional.
+ When the deployment phase was last updated.
+ "progress": {
+ "error_steps": 0, # Optional. Number of unsuccessful
+ steps.
+ "pending_steps": 0, # Optional. Number of pending
+ steps.
+ "running_steps": 0, # Optional. Number of currently
+ running steps.
+ "steps": [
+ {
+ "component_name": "str", # Optional.
+ The component name that this step is associated with.
+ "ended_at": "2020-02-20 00:00:00", #
+ Optional. The end time of this step.
+ "message_base": "str", # Optional.
+ The base of a human-readable description of the step intended
+ to be combined with the component name for presentation. For
+ example: ``message_base`` = "Building service"
+ ``component_name`` = "api".
+ "name": "str", # Optional. The name
+ of this step.
+ "reason": {
+ "code": "str", # Optional.
+ The error code.
+ "message": "str" # Optional.
+ The error message.
+ },
+ "started_at": "2020-02-20 00:00:00",
+ # Optional. The start time of this step.
+ "status": "UNKNOWN", # Optional.
+ Default value is "UNKNOWN". Known values are: "UNKNOWN",
+ "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child steps
+ of this step.
+ ]
+ }
+ ],
+ "success_steps": 0, # Optional. Number of successful
+ steps.
+ "summary_steps": [
+ {
+ "component_name": "str", # Optional.
+ The component name that this step is associated with.
+ "ended_at": "2020-02-20 00:00:00", #
+ Optional. The end time of this step.
+ "message_base": "str", # Optional.
+ The base of a human-readable description of the step intended
+ to be combined with the component name for presentation. For
+ example: ``message_base`` = "Building service"
+ ``component_name`` = "api".
+ "name": "str", # Optional. The name
+ of this step.
+ "reason": {
+ "code": "str", # Optional.
+ The error code.
+ "message": "str" # Optional.
+ The error message.
+ },
+ "started_at": "2020-02-20 00:00:00",
+ # Optional. The start time of this step.
+ "status": "UNKNOWN", # Optional.
+ Default value is "UNKNOWN". Known values are: "UNKNOWN",
+ "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child steps
+ of this step.
+ ]
+ }
+ ],
+ "total_steps": 0 # Optional. Total number of steps.
+ },
+ "services": [
+ {
+ "name": "str", # Optional. The name of this
+ service.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this
+ service.
+ }
+ ],
+ "spec": {
+ "name": "str", # The name of the app. Must be unique
+ across all apps in the same account. Required.
+ "databases": [
+ {
+ "name": "str", # The database's
+ name. The name must be unique across all components within
+ the same app and cannot use capital letters. Required.
+ "cluster_name": "str", # Optional.
+ The name of the underlying DigitalOcean DBaaS cluster. This
+ is required for production databases. For dev databases, if
+ cluster_name is not set, a new cluster will be provisioned.
+ "db_name": "str", # Optional. The
+ name of the MySQL or PostgreSQL database to configure.
+ "db_user": "str", # Optional. The
+ name of the MySQL or PostgreSQL user to configure.
+ "engine": "UNSET", # Optional.
+ Default value is "UNSET". * MYSQL: MySQL * PG: PostgreSQL *
+ REDIS: Caching * MONGODB: MongoDB * KAFKA: Kafka *
+ OPENSEARCH: OpenSearch * VALKEY: ValKey. Known values are:
+ "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", "KAFKA",
+ "OPENSEARCH", and "VALKEY".
+ "production": bool, # Optional.
+ Whether this is a production or dev database.
+ "version": "str" # Optional. The
+ version of the database engine.
+ }
+ ],
+ "disable_edge_cache": False, # Optional. Default
+ value is False. .. role:: raw-html-m2r(raw) :format: html If set
+ to ``true``"" , the app will **not** be cached at the edge (CDN).
+ Enable this option if you want to manage CDN configuration
+ yourself"u2014whether by using an external CDN provider or by
+ handling static content and caching within your app. This setting is
+ also recommended for apps that require real-time data or serve
+ dynamic content, such as those using Server-Sent Events (SSE) over
+ GET, or hosting an MCP (Model Context Protocol) Server that utilizes
+ SSE."" :raw-html-m2r:`
` **Note:** This feature is not available
+ for static site components."" :raw-html-m2r:`
` For more
+ information, see `Disable CDN Cache
+ `_.
+ "disable_email_obfuscation": False, # Optional.
+ Default value is False. If set to ``true``"" , email addresses in the
+ app will not be obfuscated. This is useful for apps that require
+ email addresses to be visible (in the HTML markup).
+ "domains": [
+ {
+ "domain": "str", # The hostname for
+ the domain. Required.
+ "minimum_tls_version": "str", #
+ Optional. The minimum version of TLS a client application can
+ use to access resources for the domain. Must be one of the
+ following values wrapped within quotations: ``"1.2"`` or
+ ``"1.3"``. Known values are: "1.2" and "1.3".
+ "type": "UNSPECIFIED", # Optional.
+ Default value is "UNSPECIFIED". * DEFAULT: The default
+ ``.ondigitalocean.app`` domain assigned to this app *
+ PRIMARY: The primary domain for this app that is displayed as
+ the default in the control panel, used in bindable
+ environment variables, and any other places that reference an
+ app's live URL. Only one domain may be set as primary. *
+ ALIAS: A non-primary domain. Known values are: "UNSPECIFIED",
+ "DEFAULT", "PRIMARY", and "ALIAS".
+ "wildcard": bool, # Optional.
+ Indicates whether the domain includes all sub-domains, in
+ addition to the given domain.
+ "zone": "str" # Optional. Optional.
+ If the domain uses DigitalOcean DNS and you would like App
+ Platform to automatically manage it for you, set this to the
+ name of the domain on your account. For example, If the
+ domain you are adding is ``app.domain.com``"" , the zone
+ could be ``domain.com``.
+ }
+ ],
+ "egress": {
+ "type": "AUTOASSIGN" # Optional. Default
+ value is "AUTOASSIGN". The app egress type. Known values are:
+ "AUTOASSIGN" and "DEDICATED_IP".
+ },
+ "enhanced_threat_control_enabled": False, #
+ Optional. Default value is False. If set to ``true``"" , suspicious
+ requests will go through additional security checks to help mitigate
+ layer 7 DDoS attacks.
+ "functions": [
+ {
+ "name": "str", # The name. Must be
+ unique across all components within the same app. Required.
+ "alerts": [
+ {
+ "disabled": bool, #
+ Optional. Is the alert disabled?.
+ "operator":
+ "UNSPECIFIED_OPERATOR", # Optional. Default value is
+ "UNSPECIFIED_OPERATOR". Known values are:
+ "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
+ "LESS_THAN".
+ "rule":
+ "UNSPECIFIED_RULE", # Optional. Default value is
+ "UNSPECIFIED_RULE". Known values are:
+ "UNSPECIFIED_RULE", "CPU_UTILIZATION",
+ "MEM_UTILIZATION", "RESTART_COUNT",
+ "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE",
+ "DOMAIN_FAILED", "DOMAIN_LIVE", "AUTOSCALE_FAILED",
+ "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT",
+ "FUNCTIONS_AVERAGE_DURATION_MS",
+ "FUNCTIONS_ERROR_RATE_PER_MINUTE",
+ "FUNCTIONS_AVERAGE_WAIT_TIME_MS",
+ "FUNCTIONS_ERROR_COUNT", and
+ "FUNCTIONS_GB_RATE_PER_SECOND".
+ "value": 0.0, #
+ Optional. Threshold value for alert.
+ "window":
+ "UNSPECIFIED_WINDOW" # Optional. Default value is
+ "UNSPECIFIED_WINDOW". Known values are:
+ "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
+ "THIRTY_MINUTES", and "ONE_HOUR".
+ }
+ ],
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
+ ],
+ "source_dir": "str" # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ }
+ ],
+ "ingress": {
+ "rules": [
+ {
+ "component": {
+ "name": "str", # The
+ name of the component to route to. Required.
+ "preserve_path_prefix": "str", # Optional. An
+ optional flag to preserve the path that is forwarded
+ to the backend service. By default, the HTTP request
+ path will be trimmed from the left when forwarded to
+ the component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``. Note: this is not
+ applicable for Functions Components and is mutually
+ exclusive with ``rewrite``.
+ "rewrite": "str" #
+ Optional. An optional field that will rewrite the
+ path of the component to be what is specified here.
+ By default, the HTTP request path will be trimmed
+ from the left when forwarded to the component. For
+ example, a component with ``path=/api`` will have
+ requests to ``/api/list`` trimmed to ``/list``. If
+ you specified the rewrite to be ``/v1/``"" , requests
+ to ``/api/list`` would be rewritten to ``/v1/list``.
+ Note: this is mutually exclusive with
+ ``preserve_path_prefix``.
+ },
+ "cors": {
+ "allow_credentials":
+ bool, # Optional. Whether browsers should expose the
+ response to the client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" #
+ Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" #
+ Optional. The set of allowed HTTP methods. This
+ configures the ``Access-Control-Allow-Methods``
+ header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "prefix": "str", # Optional. Prefix-based
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For
+ more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" #
+ Optional. The set of HTTP response headers that
+ browsers are allowed to access. This configures
+ the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" #
+ Optional. An optional duration specifying how long
+ browsers can cache the results of a preflight
+ request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "match": {
+ "authority": {
+ "exact":
+ "str" # Required.
+ },
+ "path": {
+ "prefix":
+ "str" # Prefix-based match. For example,
+ ``/api`` will match ``/api``"" , ``/api/``"" ,
+ and any nested paths such as
+ ``/api/v1/endpoint``. Required.
+ }
+ },
+ "redirect": {
+ "authority": "str",
+ # Optional. The authority/host to redirect to. This
+ can be a hostname or IP address. Note: use ``port``
+ to set the port.
+ "port": 0, #
+ Optional. The port to redirect to.
+ "redirect_code": 0,
+ # Optional. The redirect code to use. Defaults to
+ ``302``. Supported values are 300, 301, 302, 303,
+ 304, 307, 308.
+ "scheme": "str", #
+ Optional. The scheme to redirect to. Supported values
+ are ``http`` or ``https``. Default: ``https``.
+ "uri": "str" #
+ Optional. An optional URI path to redirect to. Note:
+ if this is specified the whole URI of the original
+ request will be overwritten to this value,
+ irrespective of the original request URI being
+ matched.
+ }
+ }
+ ]
+ },
+ "jobs": [
+ {
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
+ },
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
+ },
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "kind": "UNSPECIFIED", # Optional.
+ Default value is "UNSPECIFIED". * UNSPECIFIED: Default job
+ type, will auto-complete to POST_DEPLOY kind. * PRE_DEPLOY:
+ Indicates a job that runs before an app deployment. *
+ POST_DEPLOY: Indicates a job that runs after an app
+ deployment. * FAILED_DEPLOY: Indicates a job that runs after
+ a component fails to deploy. Known values are: "UNSPECIFIED",
+ "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY".
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
+ }
+ }
+ ],
+ "maintenance": {
+ "archive": bool, # Optional. Indicates
+ whether the app should be archived. Setting this to true implies
+ that enabled is set to true.
+ "enabled": bool, # Optional. Indicates
+ whether maintenance mode should be enabled for the app.
+ "offline_page_url": "str" # Optional. A
+ custom offline page to display when maintenance mode is enabled
+ or the app is archived.
+ },
+ "region": "str", # Optional. The slug form of the
+ geographical origin of the app. Default: ``nearest available``. Known
+ values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr",
+ "sgp", and "syd".
+ "services": [
+ {
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed. If not
+ set, the health check will be performed on the
+ component's http_port.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "http_port": 0, # Optional. The
+ internal port on which this service's run command will
+ listen. Default: 8080 If there is not an environment variable
+ with the name ``PORT``"" , one will be automatically added
+ with its value set to the value of this field.
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
+ },
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
+ },
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "internal_ports": [
+ 0 # Optional. The ports on
+ which this service will listen for internal traffic.
+ ],
+ "liveness_health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "protocol": "str", # Optional. The
+ protocol which the service uses to serve traffic on the
+ http_port. * ``HTTP``"" : The app is serving the HTTP
+ protocol. Default. * ``HTTP2``"" : The app is serving the
+ HTTP/2 protocol. Currently, this needs to be implemented in
+ the service by serving HTTP/2 cleartext (h2c). Known values
+ are: "HTTP" and "HTTP2".
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
+ ],
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "drain_seconds": 0, #
+ Optional. The number of seconds to wait between selecting
+ a container instance for termination and issuing the TERM
+ signal. Selecting a container instance for termination
+ begins an asynchronous drain of new requests on upstream
+ load-balancers. (Default 15).
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
+ }
+ }
+ ],
+ "static_sites": [
+ {
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "catchall_document": "str", #
+ Optional. The name of the document to use as the fallback for
+ any requests to documents that are not found when serving
+ this static site. Only 1 of ``catchall_document`` or
+ ``error_document`` can be set.
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "error_document": "404.html", #
+ Optional. Default value is "404.html". The name of the error
+ document to use when serving this static site. Default:
+ 404.html. If no such file exists within the built assets, App
+ Platform will supply one.
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
+ },
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
+ },
+ "index_document": "index.html", #
+ Optional. Default value is "index.html". The name of the
+ index document to use when serving this static site. Default:
+ index.html.
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "output_dir": "str", # Optional. An
+ optional path to where the built assets will be located,
+ relative to the build context. If not set, App Platform will
+ automatically scan for these directory names: ``_static``"" ,
+ ``dist``"" , ``public``"" , ``build``.
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
+ ],
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str" # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ }
+ ],
+ "vpc": {
+ "egress_ips": [
+ {
+ "ip": "str" # Optional. The
+ egress ips associated with the VPC.
+ }
+ ],
+ "id": "str" # Optional. The ID of the VPC.
+ },
+ "workers": [
+ {
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
+ },
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
+ },
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "liveness_health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
+ }
+ }
+ ]
+ },
+ "static_sites": [
+ {
+ "name": "str", # Optional. The name of this
+ static site.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this static
+ site.
+ }
+ ],
+ "tier_slug": "str", # Optional. The current pricing tier
+ slug of the deployment.
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the
+ deployment was last updated.
+ "workers": [
+ {
+ "name": "str", # Optional. The name of this
+ worker.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this worker.
+ }
+ ]
+ },
+ "deployment_id": "str", # Optional. For deployment events, this is
+ the same as the deployment's ID. For autoscaling events, this is the
+ deployment that was autoscaled.
+ "id": "str", # Optional. The ID of the event (UUID).
+ "type": "str" # Optional. The type of event. Known values are:
+ "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING".
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_get_event_request(
+ app_id=app_id,
+ event_id=event_id,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def cancel_event(self, app_id: str, event_id: str, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """Cancel an Event.
+
+ Cancel an in-progress autoscaling event.
+
+ :param app_id: The app ID. Required.
+ :type app_id: str
+ :param event_id: The event ID. Required.
+ :type event_id: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "event": {
+ "autoscaling": {
+ "components": {
+ "str": {
+ "from": 0, # Optional. The number of
+ replicas before scaling.
+ "to": 0, # Optional. The number of replicas
+ after scaling.
+ "triggering_metric": "str" # Optional. The
+ metric that triggered the scale change. Known values are "cpu",
+ "requests_per_second", "request_duration". For inactivity sleep,
+ "scale_from_zero" and "scale_to_zero" are used.
+ }
+ },
+ "phase": "str" # Optional. The current phase of the
+ autoscaling event. Known values are: "UNKNOWN", "PENDING", "IN_PROGRESS",
+ "SUCCEEDED", "FAILED", and "CANCELED".
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. When the event was
+ created.
+ "deployment": {
+ "cause": "str", # Optional. What caused this deployment to
+ be created.
+ "cloned_from": "str", # Optional. The ID of a previous
+ deployment that this deployment was cloned from.
+ "created_at": "2020-02-20 00:00:00", # Optional. The
+ creation time of the deployment.
+ "functions": [
+ {
+ "name": "str", # Optional. The name of this
+ functions component.
+ "namespace": "str", # Optional. The
+ namespace where the functions are deployed.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this
+ functions component.
+ }
+ ],
+ "id": "str", # Optional. The ID of the deployment.
+ "jobs": [
+ {
+ "name": "str", # Optional. The name of this
+ job.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this job.
+ }
+ ],
+ "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN".
+ Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING",
+ "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and
+ "CANCELED".
+ "phase_last_updated_at": "2020-02-20 00:00:00", # Optional.
+ When the deployment phase was last updated.
+ "progress": {
+ "error_steps": 0, # Optional. Number of unsuccessful
+ steps.
+ "pending_steps": 0, # Optional. Number of pending
+ steps.
+ "running_steps": 0, # Optional. Number of currently
+ running steps.
+ "steps": [
+ {
+ "component_name": "str", # Optional.
+ The component name that this step is associated with.
+ "ended_at": "2020-02-20 00:00:00", #
+ Optional. The end time of this step.
+ "message_base": "str", # Optional.
+ The base of a human-readable description of the step intended
+ to be combined with the component name for presentation. For
+ example: ``message_base`` = "Building service"
+ ``component_name`` = "api".
+ "name": "str", # Optional. The name
+ of this step.
+ "reason": {
+ "code": "str", # Optional.
+ The error code.
+ "message": "str" # Optional.
+ The error message.
+ },
+ "started_at": "2020-02-20 00:00:00",
+ # Optional. The start time of this step.
+ "status": "UNKNOWN", # Optional.
+ Default value is "UNKNOWN". Known values are: "UNKNOWN",
+ "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child steps
+ of this step.
+ ]
+ }
+ ],
+ "success_steps": 0, # Optional. Number of successful
+ steps.
+ "summary_steps": [
+ {
+ "component_name": "str", # Optional.
+ The component name that this step is associated with.
+ "ended_at": "2020-02-20 00:00:00", #
+ Optional. The end time of this step.
+ "message_base": "str", # Optional.
+ The base of a human-readable description of the step intended
+ to be combined with the component name for presentation. For
+ example: ``message_base`` = "Building service"
+ ``component_name`` = "api".
+ "name": "str", # Optional. The name
+ of this step.
+ "reason": {
+ "code": "str", # Optional.
+ The error code.
+ "message": "str" # Optional.
+ The error message.
+ },
+ "started_at": "2020-02-20 00:00:00",
+ # Optional. The start time of this step.
+ "status": "UNKNOWN", # Optional.
+ Default value is "UNKNOWN". Known values are: "UNKNOWN",
+ "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child steps
+ of this step.
+ ]
+ }
+ ],
+ "total_steps": 0 # Optional. Total number of steps.
+ },
+ "services": [
+ {
+ "name": "str", # Optional. The name of this
+ service.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this
+ service.
+ }
+ ],
+ "spec": {
+ "name": "str", # The name of the app. Must be unique
+ across all apps in the same account. Required.
+ "databases": [
+ {
+ "name": "str", # The database's
+ name. The name must be unique across all components within
+ the same app and cannot use capital letters. Required.
+ "cluster_name": "str", # Optional.
+ The name of the underlying DigitalOcean DBaaS cluster. This
+ is required for production databases. For dev databases, if
+ cluster_name is not set, a new cluster will be provisioned.
+ "db_name": "str", # Optional. The
+ name of the MySQL or PostgreSQL database to configure.
+ "db_user": "str", # Optional. The
+ name of the MySQL or PostgreSQL user to configure.
+ "engine": "UNSET", # Optional.
+ Default value is "UNSET". * MYSQL: MySQL * PG: PostgreSQL *
+ REDIS: Caching * MONGODB: MongoDB * KAFKA: Kafka *
+ OPENSEARCH: OpenSearch * VALKEY: ValKey. Known values are:
+ "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", "KAFKA",
+ "OPENSEARCH", and "VALKEY".
+ "production": bool, # Optional.
+ Whether this is a production or dev database.
+ "version": "str" # Optional. The
+ version of the database engine.
+ }
+ ],
+ "disable_edge_cache": False, # Optional. Default
+ value is False. .. role:: raw-html-m2r(raw) :format: html If set
+ to ``true``"" , the app will **not** be cached at the edge (CDN).
+ Enable this option if you want to manage CDN configuration
+ yourself"u2014whether by using an external CDN provider or by
+ handling static content and caching within your app. This setting is
+ also recommended for apps that require real-time data or serve
+ dynamic content, such as those using Server-Sent Events (SSE) over
+ GET, or hosting an MCP (Model Context Protocol) Server that utilizes
+ SSE."" :raw-html-m2r:`
` **Note:** This feature is not available
+ for static site components."" :raw-html-m2r:`
` For more
+ information, see `Disable CDN Cache
+ `_.
+ "disable_email_obfuscation": False, # Optional.
+ Default value is False. If set to ``true``"" , email addresses in the
+ app will not be obfuscated. This is useful for apps that require
+ email addresses to be visible (in the HTML markup).
+ "domains": [
+ {
+ "domain": "str", # The hostname for
+ the domain. Required.
+ "minimum_tls_version": "str", #
+ Optional. The minimum version of TLS a client application can
+ use to access resources for the domain. Must be one of the
+ following values wrapped within quotations: ``"1.2"`` or
+ ``"1.3"``. Known values are: "1.2" and "1.3".
+ "type": "UNSPECIFIED", # Optional.
+ Default value is "UNSPECIFIED". * DEFAULT: The default
+ ``.ondigitalocean.app`` domain assigned to this app *
+ PRIMARY: The primary domain for this app that is displayed as
+ the default in the control panel, used in bindable
+ environment variables, and any other places that reference an
+ app's live URL. Only one domain may be set as primary. *
+ ALIAS: A non-primary domain. Known values are: "UNSPECIFIED",
+ "DEFAULT", "PRIMARY", and "ALIAS".
+ "wildcard": bool, # Optional.
+ Indicates whether the domain includes all sub-domains, in
+ addition to the given domain.
+ "zone": "str" # Optional. Optional.
+ If the domain uses DigitalOcean DNS and you would like App
+ Platform to automatically manage it for you, set this to the
+ name of the domain on your account. For example, If the
+ domain you are adding is ``app.domain.com``"" , the zone
+ could be ``domain.com``.
+ }
+ ],
+ "egress": {
+ "type": "AUTOASSIGN" # Optional. Default
+ value is "AUTOASSIGN". The app egress type. Known values are:
+ "AUTOASSIGN" and "DEDICATED_IP".
+ },
+ "enhanced_threat_control_enabled": False, #
+ Optional. Default value is False. If set to ``true``"" , suspicious
+ requests will go through additional security checks to help mitigate
+ layer 7 DDoS attacks.
+ "functions": [
+ {
+ "name": "str", # The name. Must be
+ unique across all components within the same app. Required.
+ "alerts": [
+ {
+ "disabled": bool, #
+ Optional. Is the alert disabled?.
+ "operator":
+ "UNSPECIFIED_OPERATOR", # Optional. Default value is
+ "UNSPECIFIED_OPERATOR". Known values are:
+ "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
+ "LESS_THAN".
+ "rule":
+ "UNSPECIFIED_RULE", # Optional. Default value is
+ "UNSPECIFIED_RULE". Known values are:
+ "UNSPECIFIED_RULE", "CPU_UTILIZATION",
+ "MEM_UTILIZATION", "RESTART_COUNT",
+ "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE",
+ "DOMAIN_FAILED", "DOMAIN_LIVE", "AUTOSCALE_FAILED",
+ "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT",
+ "FUNCTIONS_AVERAGE_DURATION_MS",
+ "FUNCTIONS_ERROR_RATE_PER_MINUTE",
+ "FUNCTIONS_AVERAGE_WAIT_TIME_MS",
+ "FUNCTIONS_ERROR_COUNT", and
+ "FUNCTIONS_GB_RATE_PER_SECOND".
+ "value": 0.0, #
+ Optional. Threshold value for alert.
+ "window":
+ "UNSPECIFIED_WINDOW" # Optional. Default value is
+ "UNSPECIFIED_WINDOW". Known values are:
+ "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
+ "THIRTY_MINUTES", and "ONE_HOUR".
+ }
+ ],
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
+ ],
+ "source_dir": "str" # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ }
+ ],
+ "ingress": {
+ "rules": [
+ {
+ "component": {
+ "name": "str", # The
+ name of the component to route to. Required.
+ "preserve_path_prefix": "str", # Optional. An
+ optional flag to preserve the path that is forwarded
+ to the backend service. By default, the HTTP request
+ path will be trimmed from the left when forwarded to
+ the component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``. Note: this is not
+ applicable for Functions Components and is mutually
+ exclusive with ``rewrite``.
+ "rewrite": "str" #
+ Optional. An optional field that will rewrite the
+ path of the component to be what is specified here.
+ By default, the HTTP request path will be trimmed
+ from the left when forwarded to the component. For
+ example, a component with ``path=/api`` will have
+ requests to ``/api/list`` trimmed to ``/list``. If
+ you specified the rewrite to be ``/v1/``"" , requests
+ to ``/api/list`` would be rewritten to ``/v1/list``.
+ Note: this is mutually exclusive with
+ ``preserve_path_prefix``.
+ },
+ "cors": {
+ "allow_credentials":
+ bool, # Optional. Whether browsers should expose the
+ response to the client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" #
+ Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" #
+ Optional. The set of allowed HTTP methods. This
+ configures the ``Access-Control-Allow-Methods``
+ header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "prefix": "str", # Optional. Prefix-based
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For
+ more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" #
+ Optional. The set of HTTP response headers that
+ browsers are allowed to access. This configures
+ the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" #
+ Optional. An optional duration specifying how long
+ browsers can cache the results of a preflight
+ request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "match": {
+ "authority": {
+ "exact":
+ "str" # Required.
+ },
+ "path": {
+ "prefix":
+ "str" # Prefix-based match. For example,
+ ``/api`` will match ``/api``"" , ``/api/``"" ,
+ and any nested paths such as
+ ``/api/v1/endpoint``. Required.
+ }
+ },
+ "redirect": {
+ "authority": "str",
+ # Optional. The authority/host to redirect to. This
+ can be a hostname or IP address. Note: use ``port``
+ to set the port.
+ "port": 0, #
+ Optional. The port to redirect to.
+ "redirect_code": 0,
+ # Optional. The redirect code to use. Defaults to
+ ``302``. Supported values are 300, 301, 302, 303,
+ 304, 307, 308.
+ "scheme": "str", #
+ Optional. The scheme to redirect to. Supported values
+ are ``http`` or ``https``. Default: ``https``.
+ "uri": "str" #
+ Optional. An optional URI path to redirect to. Note:
+ if this is specified the whole URI of the original
+ request will be overwritten to this value,
+ irrespective of the original request URI being
+ matched.
+ }
+ }
+ ]
+ },
+ "jobs": [
+ {
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
+ },
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
+ },
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "kind": "UNSPECIFIED", # Optional.
+ Default value is "UNSPECIFIED". * UNSPECIFIED: Default job
+ type, will auto-complete to POST_DEPLOY kind. * PRE_DEPLOY:
+ Indicates a job that runs before an app deployment. *
+ POST_DEPLOY: Indicates a job that runs after an app
+ deployment. * FAILED_DEPLOY: Indicates a job that runs after
+ a component fails to deploy. Known values are: "UNSPECIFIED",
+ "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY".
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
+ }
+ }
+ ],
+ "maintenance": {
+ "archive": bool, # Optional. Indicates
+ whether the app should be archived. Setting this to true implies
+ that enabled is set to true.
+ "enabled": bool, # Optional. Indicates
+ whether maintenance mode should be enabled for the app.
+ "offline_page_url": "str" # Optional. A
+ custom offline page to display when maintenance mode is enabled
+ or the app is archived.
+ },
+ "region": "str", # Optional. The slug form of the
+ geographical origin of the app. Default: ``nearest available``. Known
+ values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr",
+ "sgp", and "syd".
+ "services": [
+ {
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed. If not
+ set, the health check will be performed on the
+ component's http_port.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "http_port": 0, # Optional. The
+ internal port on which this service's run command will
+ listen. Default: 8080 If there is not an environment variable
+ with the name ``PORT``"" , one will be automatically added
+ with its value set to the value of this field.
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
+ },
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
+ },
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "internal_ports": [
+ 0 # Optional. The ports on
+ which this service will listen for internal traffic.
+ ],
+ "liveness_health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "protocol": "str", # Optional. The
+ protocol which the service uses to serve traffic on the
+ http_port. * ``HTTP``"" : The app is serving the HTTP
+ protocol. Default. * ``HTTP2``"" : The app is serving the
+ HTTP/2 protocol. Currently, this needs to be implemented in
+ the service by serving HTTP/2 cleartext (h2c). Known values
+ are: "HTTP" and "HTTP2".
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
+ ],
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "drain_seconds": 0, #
+ Optional. The number of seconds to wait between selecting
+ a container instance for termination and issuing the TERM
+ signal. Selecting a container instance for termination
+ begins an asynchronous drain of new requests on upstream
+ load-balancers. (Default 15).
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
+ }
+ }
+ ],
+ "static_sites": [
+ {
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "catchall_document": "str", #
+ Optional. The name of the document to use as the fallback for
+ any requests to documents that are not found when serving
+ this static site. Only 1 of ``catchall_document`` or
+ ``error_document`` can be set.
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "error_document": "404.html", #
+ Optional. Default value is "404.html". The name of the error
+ document to use when serving this static site. Default:
+ 404.html. If no such file exists within the built assets, App
+ Platform will supply one.
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
+ },
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
+ },
+ "index_document": "index.html", #
+ Optional. Default value is "index.html". The name of the
+ index document to use when serving this static site. Default:
+ index.html.
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "output_dir": "str", # Optional. An
+ optional path to where the built assets will be located,
+ relative to the build context. If not set, App Platform will
+ automatically scan for these directory names: ``_static``"" ,
+ ``dist``"" , ``public``"" , ``build``.
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
+ ],
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str" # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ }
+ ],
+ "vpc": {
+ "egress_ips": [
+ {
+ "ip": "str" # Optional. The
+ egress ips associated with the VPC.
+ }
+ ],
+ "id": "str" # Optional. The ID of the VPC.
+ },
+ "workers": [
+ {
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
+ },
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
+ },
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "liveness_health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
+ }
+ }
+ ]
+ },
+ "static_sites": [
+ {
+ "name": "str", # Optional. The name of this
+ static site.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this static
+ site.
+ }
+ ],
+ "tier_slug": "str", # Optional. The current pricing tier
+ slug of the deployment.
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the
+ deployment was last updated.
+ "workers": [
+ {
+ "name": "str", # Optional. The name of this
+ worker.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this worker.
+ }
+ ]
+ },
+ "deployment_id": "str", # Optional. For deployment events, this is
+ the same as the deployment's ID. For autoscaling events, this is the
+ deployment that was autoscaled.
+ "id": "str", # Optional. The ID of the event (UUID).
+ "type": "str" # Optional. The type of event. Known values are:
+ "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING".
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_cancel_event_request(
+ app_id=app_id,
+ event_id=event_id,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def get_event_logs(
+ self,
+ app_id: str,
+ event_id: str,
+ *,
+ follow: Optional[bool] = None,
+ type: str = "UNSPECIFIED",
+ pod_connection_timeout: Optional[str] = None,
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Retrieve Event Logs.
+
+ Retrieve the logs of an autoscaling event for an app.
+
+ :param app_id: The app ID. Required.
+ :type app_id: str
+ :param event_id: The event ID. Required.
+ :type event_id: str
+ :keyword follow: Whether the logs should follow live updates. Default value is None.
+ :paramtype follow: bool
+ :keyword type: The type of logs to retrieve
+
+
+ * BUILD: Build-time logs
+ * DEPLOY: Deploy-time logs
+ * RUN: Live run-time logs
+ * RUN_RESTARTED: Logs of crashed/restarted instances during runtime
+ * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are:
+ "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value
+ is "UNSPECIFIED".
+ :paramtype type: str
+ :keyword pod_connection_timeout: An optional time duration to wait if the underlying component
+ instance is not immediately available. Default: ``3m``. Default value is None.
+ :paramtype pod_connection_timeout: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "historic_urls": [
+ "str" # Optional. A list of URLs to archived log files.
+ ],
+ "live_url": "str" # Optional. A URL of the real-time live logs. This URL may
+ use either the ``https://`` or ``wss://`` protocols and will keep pushing live
+ logs as they become available.
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_get_event_logs_request(
+ app_id=app_id,
+ event_id=event_id,
+ follow=follow,
+ type=type,
+ pod_connection_timeout=pod_connection_timeout,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
@distributed_trace_async
async def list_instance_sizes(self, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
@@ -104673,71 +110007,1256 @@ async def list_options(self, **kwargs: Any) -> JSON:
"version": "str" # Optional. The engine version.
}
],
- "mysql": [
+ "mysql": [
+ {
+ "end_of_availability": "str", # Optional. A
+ timestamp referring to the date when the particular version will no
+ longer be available for creating new clusters. If null, the version
+ does not have an end of availability timeline.
+ "end_of_life": "str", # Optional. A timestamp
+ referring to the date when the particular version will no longer be
+ supported. If null, the version does not have an end of life
+ timeline.
+ "version": "str" # Optional. The engine version.
+ }
+ ],
+ "opensearch": [
+ {
+ "end_of_availability": "str", # Optional. A
+ timestamp referring to the date when the particular version will no
+ longer be available for creating new clusters. If null, the version
+ does not have an end of availability timeline.
+ "end_of_life": "str", # Optional. A timestamp
+ referring to the date when the particular version will no longer be
+ supported. If null, the version does not have an end of life
+ timeline.
+ "version": "str" # Optional. The engine version.
+ }
+ ],
+ "pg": [
+ {
+ "end_of_availability": "str", # Optional. A
+ timestamp referring to the date when the particular version will no
+ longer be available for creating new clusters. If null, the version
+ does not have an end of availability timeline.
+ "end_of_life": "str", # Optional. A timestamp
+ referring to the date when the particular version will no longer be
+ supported. If null, the version does not have an end of life
+ timeline.
+ "version": "str" # Optional. The engine version.
+ }
+ ],
+ "redis": [
+ {
+ "end_of_availability": "str", # Optional. A
+ timestamp referring to the date when the particular version will no
+ longer be available for creating new clusters. If null, the version
+ does not have an end of availability timeline.
+ "end_of_life": "str", # Optional. A timestamp
+ referring to the date when the particular version will no longer be
+ supported. If null, the version does not have an end of life
+ timeline.
+ "version": "str" # Optional. The engine version.
+ }
+ ],
+ "valkey": [
+ {
+ "end_of_availability": "str", # Optional. A
+ timestamp referring to the date when the particular version will no
+ longer be available for creating new clusters. If null, the version
+ does not have an end of availability timeline.
+ "end_of_life": "str", # Optional. A timestamp
+ referring to the date when the particular version will no longer be
+ supported. If null, the version does not have an end of life
+ timeline.
+ "version": "str" # Optional. The engine version.
+ }
+ ]
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_databases_list_options_request(
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def list_clusters(
+ self, *, tag_name: Optional[str] = None, **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """List All Database Clusters.
+
+ To list all of the database clusters available on your account, send a GET request to
+ ``/v2/databases``. To limit the results to database clusters with a specific tag, include the
+ ``tag_name`` query parameter set to the name of the tag. For example,
+ ``/v2/databases?tag_name=$TAG_NAME``.
+
+ The result will be a JSON object with a ``databases`` key. This will be set to an array of
+ database objects, each of which will contain the standard database attributes.
+
+ The embedded ``connection`` and ``private_connection`` objects will contain the information
+ needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and
+ ``standby_private_connection`` objects will contain the information needed to connect to the
+ cluster's standby node(s).
+
+ The embedded ``maintenance_window`` object will contain information about any scheduled
+ maintenance for the database cluster.
+
+ :keyword tag_name: Limits the results to database clusters with a specific
+ tag.:code:`
`:code:`
`Requires ``tag:read`` scope. Default value is None.
+ :paramtype tag_name: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "databases": [
+ {
+ "engine": "str", # A slug representing the database engine
+ used for the cluster. The possible values are: "pg" for PostgreSQL,
+ "mysql" for MySQL, "redis" for Caching, "mongodb" for MongoDB, "kafka"
+ for Kafka, "opensearch" for OpenSearch, and "valkey" for Valkey.
+ Required. Known values are: "pg", "mysql", "redis", "valkey", "mongodb",
+ "kafka", and "opensearch".
+ "name": "str", # A unique, human-readable name referring to
+ a database cluster. Required.
+ "num_nodes": 0, # The number of nodes in the database
+ cluster. Required.
+ "region": "str", # The slug identifier for the region where
+ the database cluster is located. Required.
+ "size": "str", # The slug identifier representing the size
+ of the nodes in the database cluster. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the
+ default database.
+ "host": "str", # Optional. The FQDN pointing to the
+ database cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ database cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the
+ format accepted by the ``psql`` command. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. A time
+ value given in ISO8601 combined date and time format that represents when
+ the database cluster was created.
+ "db_names": [
+ "str" # Optional. An array of strings containing the
+ names of databases created in the database cluster.
+ ],
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs
+ for the database cluster. Each CNAME must be a valid RFC 1123
+ hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed,
+ each up to 253 characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to
+ identify and reference a database cluster.
+ "maintenance_window": {
+ "day": "str", # The day of the week on which to
+ apply maintenance updates. Required.
+ "hour": "str", # The hour in UTC at which
+ maintenance updates will be applied in 24 hour format. Required.
+ "description": [
+ "str" # Optional. A list of strings, each
+ containing information about a pending maintenance update.
+ ],
+ "pending": bool # Optional. A boolean value
+ indicating whether any maintenance is scheduled to be performed in
+ the next window.
+ },
+ "metrics_endpoints": [
+ {
+ "host": "str", # Optional. A FQDN pointing
+ to the database cluster's node(s).
+ "port": 0 # Optional. The port on which a
+ service is listening.
+ }
+ ],
+ "private_connection": {
+ "database": "str", # Optional. The name of the
+ default database.
+ "host": "str", # Optional. The FQDN pointing to the
+ database cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ database cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the
+ format accepted by the ``psql`` command. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "private_network_uuid": "str", # Optional. A string
+ specifying the UUID of the VPC to which the database cluster will be
+ assigned. If excluded, the cluster when creating a new database cluster,
+ it will be assigned to your account's default VPC for the region.
+ :code:`
`:code:`
`Requires ``vpc:read`` scope.
+ "project_id": "str", # Optional. The ID of the project that
+ the database cluster is assigned to. If excluded when creating a new
+ database cluster, it will be assigned to your default
+ project.:code:`
`:code:`
`Requires ``project:read`` scope.
+ "rules": [
+ {
+ "type": "str", # The type of resource that
+ the firewall rule allows to access the database cluster.
+ Required. Known values are: "droplet", "k8s", "ip_addr", "tag",
+ and "app".
+ "value": "str", # The ID of the specific
+ resource, the name of a tag applied to a group of resources, or
+ the IP address that the firewall rule allows to access the
+ database cluster. Required.
+ "cluster_uuid": "str", # Optional. A unique
+ ID for the database cluster to which the rule is applied.
+ "created_at": "2020-02-20 00:00:00", #
+ Optional. A time value given in ISO8601 combined date and time
+ format that represents when the firewall rule was created.
+ "description": "str", # Optional. A
+ human-readable description of the rule.
+ "uuid": "str" # Optional. A unique ID for
+ the firewall rule itself.
+ }
+ ],
+ "schema_registry_connection": {
+ "host": "str", # Optional. The FQDN pointing to the
+ schema registry connection uri.
+ "password": "str", # Optional. The randomly
+ generated password for the schema
+ registry.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the schema
+ registry is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ schema registry.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "semantic_version": "str", # Optional. A string representing
+ the semantic version of the database engine in use for the cluster.
+ "standby_connection": {
+ "database": "str", # Optional. The name of the
+ default database.
+ "host": "str", # Optional. The FQDN pointing to the
+ database cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ database cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the
+ format accepted by the ``psql`` command. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the
+ default database.
+ "host": "str", # Optional. The FQDN pointing to the
+ database cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ database cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the
+ format accepted by the ``psql`` command. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "status": "str", # Optional. A string representing the
+ current status of the database cluster. Known values are: "creating",
+ "online", "resizing", "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added
+ to the cluster, in MiB. If null, no additional storage is added to the
+ cluster, beyond what is provided as a base amount from the 'size' and any
+ previously added additional storage.
+ "tags": [
+ "str" # Optional. An array of tags that have been
+ applied to the database cluster. :code:`
`:code:`
`Requires
+ ``tag:read`` scope.
+ ],
+ "ui_connection": {
+ "host": "str", # Optional. The FQDN pointing to the
+ opensearch cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ opensearch dashboard is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ opensearch dashboard.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "users": [
+ {
+ "name": "str", # The name of a database
+ user. Required.
+ "access_cert": "str", # Optional. Access
+ certificate for TLS client authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key
+ for TLS client authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string
+ specifying the authentication method to be used for
+ connections to the MySQL user account. The valid values are
+ ``mysql_native_password`` or ``caching_sha2_password``. If
+ excluded when creating a new user, the default for the
+ version of MySQL in use will be used. As of MySQL 8.0, the
+ default is ``caching_sha2_password``. Required. Known values
+ are: "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly
+ generated password for the database user.:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string
+ representing the database user's role. The value will be either
+ "primary" or "normal". Known values are: "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str",
+ # Permission set applied to the ACL. 'consume' allows
+ for messages to be consumed from the topic. 'produce'
+ allows for messages to be published to the topic.
+ 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for
+ 'produceconsume' as well as any operations to
+ administer the topic (delete, update). Required.
+ Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A
+ regex for matching the topic(s) that this ACL should
+ apply to. Required.
+ "id": "str" #
+ Optional. An identifier for the ACL. Will be computed
+ after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A
+ list of databases to which the user should have
+ access. When the database is set to ``admin``"" , the
+ user will have access to all databases based on the
+ user's role i.e. a user with the role ``readOnly``
+ assigned to the ``admin`` database will have read
+ access to all databases.
+ ],
+ "role": "str" # Optional.
+ The role to assign to the user with each role mapping to
+ a MongoDB built-in role. ``readOnly`` maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and
+ "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", #
+ Optional. A regex for matching the indexes that this
+ ACL should apply to.
+ "permission": "str"
+ # Optional. Permission set applied to the ACL. 'read'
+ allows user to read from the index. 'write' allows
+ for user to write to the index. 'readwrite' allows
+ for both 'read' and 'write' permission.
+ 'deny'(default) restricts user from performing any
+ operation over an index. 'admin' allows for
+ 'readwrite' as well as any operations to administer
+ the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool #
+ Optional. For Postgres clusters, set to ``true`` for a user
+ with replication rights. This option is not currently
+ supported for other database engines.
+ }
+ }
+ ],
+ "version": "str", # Optional. A string representing the
+ version of the database engine in use for the cluster.
+ "version_end_of_availability": "str", # Optional. A
+ timestamp referring to the date when the particular version will no
+ longer be available for creating new clusters. If null, the version does
+ not have an end of availability timeline.
+ "version_end_of_life": "str" # Optional. A timestamp
+ referring to the date when the particular version will no longer be
+ supported. If null, the version does not have an end of life timeline.
+ }
+ ]
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_databases_list_clusters_request(
+ tag_name=tag_name,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @overload
+ async def create_cluster(
+ self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Create a New Database Cluster.
+
+ To create a database cluster, send a POST request to ``/v2/databases``. To see a list of
+ options for each engine, such as available regions, size slugs, and versions, send a GET
+ request to the ``/v2/databases/options`` endpoint. The available sizes for the
+ ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see
+ `Managed Database Pricing `_.
+
+ The create response returns a JSON object with a key called ``database``. The value of this is
+ an object that contains the standard attributes associated with a database cluster. The initial
+ value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready
+ to receive traffic, this changes to ``online``.
+
+ The embedded ``connection`` and ``private_connection`` objects contains the information needed
+ to access the database cluster. For multi-node clusters, the ``standby_connection`` and
+ ``standby_private_connection`` objects contain the information needed to connect to the
+ cluster's standby node(s).
+
+ DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To
+ create a new database cluster based on a backup of an existing cluster, send a POST request to
+ ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must
+ include a key named ``backup_restore`` with the name of the original database cluster and the
+ timestamp of the backup to be restored. Creating a database from a backup is the same as
+ forking a database in the control panel.
+ Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are
+ also not supported for Caching or Valkey clusters.
+
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "engine": "str", # A slug representing the database engine used for the
+ cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis"
+ for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" for
+ OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", "mysql",
+ "redis", "valkey", "mongodb", "kafka", and "opensearch".
+ "name": "str", # A unique, human-readable name referring to a database
+ cluster. Required.
+ "num_nodes": 0, # The number of nodes in the database cluster. Required.
+ "region": "str", # The slug identifier for the region where the database
+ cluster is located. Required.
+ "size": "str", # The slug identifier representing the size of the nodes in
+ the database cluster. Required.
+ "autoscale": {
+ "storage": {
+ "enabled": bool, # Whether storage autoscaling is enabled
+ for the cluster. Required.
+ "increment_gib": 0, # Optional. The amount of additional
+ storage to add (in GiB) when autoscaling is triggered.
+ "threshold_percent": 0 # Optional. The storage usage
+ threshold percentage that triggers autoscaling. When storage usage
+ exceeds this percentage, additional storage will be added automatically.
+ }
+ },
+ "backup_restore": {
+ "database_name": "str", # The name of an existing database cluster
+ from which the backup will be restored. Required.
+ "backup_created_at": "2020-02-20 00:00:00" # Optional. The timestamp
+ of an existing database cluster backup in ISO8601 combined date and time
+ format. The most recent backup will be used if excluded.
+ },
+ "connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given in
+ ISO8601 combined date and time format that represents when the database cluster
+ was created.
+ "db_names": [
+ "str" # Optional. An array of strings containing the names of
+ databases created in the database cluster.
+ ],
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs for the database
+ cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
+ "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
+ characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to identify and
+ reference a database cluster.
+ "maintenance_window": {
+ "day": "str", # The day of the week on which to apply maintenance
+ updates. Required.
+ "hour": "str", # The hour in UTC at which maintenance updates will
+ be applied in 24 hour format. Required.
+ "description": [
+ "str" # Optional. A list of strings, each containing
+ information about a pending maintenance update.
+ ],
+ "pending": bool # Optional. A boolean value indicating whether any
+ maintenance is scheduled to be performed in the next window.
+ },
+ "metrics_endpoints": [
+ {
+ "host": "str", # Optional. A FQDN pointing to the database
+ cluster's node(s).
+ "port": 0 # Optional. The port on which a service is
+ listening.
+ }
+ ],
+ "private_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "private_network_uuid": "str", # Optional. A string specifying the UUID of
+ the VPC to which the database cluster will be assigned. If excluded, the cluster
+ when creating a new database cluster, it will be assigned to your account's
+ default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` scope.
+ "project_id": "str", # Optional. The ID of the project that the database
+ cluster is assigned to. If excluded when creating a new database cluster, it will
+ be assigned to your default project.:code:`
`:code:`
`Requires
+ ``project:update`` scope.
+ "rules": [
+ {
+ "type": "str", # The type of resource that the firewall rule
+ allows to access the database cluster. Required. Known values are:
+ "droplet", "k8s", "ip_addr", "tag", and "app".
+ "value": "str", # The ID of the specific resource, the name
+ of a tag applied to a group of resources, or the IP address that the
+ firewall rule allows to access the database cluster. Required.
+ "cluster_uuid": "str", # Optional. A unique ID for the
+ database cluster to which the rule is applied.
+ "created_at": "2020-02-20 00:00:00", # Optional. A time
+ value given in ISO8601 combined date and time format that represents when
+ the firewall rule was created.
+ "description": "str", # Optional. A human-readable
+ description of the rule.
+ "uuid": "str" # Optional. A unique ID for the firewall rule
+ itself.
+ }
+ ],
+ "schema_registry_connection": {
+ "host": "str", # Optional. The FQDN pointing to the schema registry
+ connection uri.
+ "password": "str", # Optional. The randomly generated password for
+ the schema registry.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the schema registry is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the schema
+ registry.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "semantic_version": "str", # Optional. A string representing the semantic
+ version of the database engine in use for the cluster.
+ "standby_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "status": "str", # Optional. A string representing the current status of the
+ database cluster. Known values are: "creating", "online", "resizing",
+ "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added to the cluster,
+ in MiB. If null, no additional storage is added to the cluster, beyond what is
+ provided as a base amount from the 'size' and any previously added additional
+ storage.
+ "tags": [
+ "str" # Optional. An array of tags (as strings) to apply to the
+ database cluster. :code:`
`:code:`
`Requires ``tag:create`` scope.
+ ],
+ "ui_connection": {
+ "host": "str", # Optional. The FQDN pointing to the opensearch
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the opensearch dashboard is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the opensearch
+ dashboard.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "users": [
+ {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS
+ client authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL
+ 8.0, the default is ``caching_sha2_password``. Required. Known values
+ are: "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password
+ for the database user.:code:`
`Requires ``database:view_credentials``
+ scope.
+ "role": "str", # Optional. A string representing the
+ database user's role. The value will be either "primary" or "normal".
+ Known values are: "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission
+ set applied to the ACL. 'consume' allows for messages to be
+ consumed from the topic. 'produce' allows for messages to be
+ published to the topic. 'produceconsume' allows for both
+ 'consume' and 'produce' permission. 'admin' allows for
+ 'produceconsume' as well as any operations to administer the
+ topic (delete, update). Required. Known values are: "admin",
+ "consume", "produce", and "produceconsume".
+ "topic": "str", # A regex for
+ matching the topic(s) that this ACL should apply to.
+ Required.
+ "id": "str" # Optional. An
+ identifier for the ACL. Will be computed after the ACL is
+ created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of
+ databases to which the user should have access. When the
+ database is set to ``admin``"" , the user will have access to
+ all databases based on the user's role i.e. a user with the
+ role ``readOnly`` assigned to the ``admin`` database will
+ have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign
+ to the user with each role mapping to a MongoDB built-in role.
+ ``readOnly`` maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex
+ for matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional.
+ Permission set applied to the ACL. 'read' allows user to read
+ from the index. 'write' allows for user to write to the
+ index. 'readwrite' allows for both 'read' and 'write'
+ permission. 'deny'(default) restricts user from performing
+ any operation over an index. 'admin' allows for 'readwrite'
+ as well as any operations to administer the index. Known
+ values are: "deny", "admin", "read", "readwrite", and
+ "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For
+ Postgres clusters, set to ``true`` for a user with replication
+ rights. This option is not currently supported for other database
+ engines.
+ }
+ }
+ ],
+ "version": "str", # Optional. A string representing the version of the
+ database engine in use for the cluster.
+ "version_end_of_availability": "str", # Optional. A timestamp referring to
+ the date when the particular version will no longer be available for creating new
+ clusters. If null, the version does not have an end of availability timeline.
+ "version_end_of_life": "str" # Optional. A timestamp referring to the date
+ when the particular version will no longer be supported. If null, the version
+ does not have an end of life timeline.
+ }
+
+ # response body for status code(s): 201
+ response == {
+ "database": {
+ "engine": "str", # A slug representing the database engine used for
+ the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL,
+ "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch"
+ for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg",
+ "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch".
+ "name": "str", # A unique, human-readable name referring to a
+ database cluster. Required.
+ "num_nodes": 0, # The number of nodes in the database cluster.
+ Required.
+ "region": "str", # The slug identifier for the region where the
+ database cluster is located. Required.
+ "size": "str", # The slug identifier representing the size of the
+ nodes in the database cluster. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the database
+ cluster was created.
+ "db_names": [
+ "str" # Optional. An array of strings containing the names
+ of databases created in the database cluster.
+ ],
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs for the
+ database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
+ "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
+ characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to identify
+ and reference a database cluster.
+ "maintenance_window": {
+ "day": "str", # The day of the week on which to apply
+ maintenance updates. Required.
+ "hour": "str", # The hour in UTC at which maintenance
+ updates will be applied in 24 hour format. Required.
+ "description": [
+ "str" # Optional. A list of strings, each containing
+ information about a pending maintenance update.
+ ],
+ "pending": bool # Optional. A boolean value indicating
+ whether any maintenance is scheduled to be performed in the next window.
+ },
+ "metrics_endpoints": [
{
- "end_of_availability": "str", # Optional. A
- timestamp referring to the date when the particular version will no
- longer be available for creating new clusters. If null, the version
- does not have an end of availability timeline.
- "end_of_life": "str", # Optional. A timestamp
- referring to the date when the particular version will no longer be
- supported. If null, the version does not have an end of life
- timeline.
- "version": "str" # Optional. The engine version.
+ "host": "str", # Optional. A FQDN pointing to the
+ database cluster's node(s).
+ "port": 0 # Optional. The port on which a service is
+ listening.
}
],
- "opensearch": [
+ "private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "private_network_uuid": "str", # Optional. A string specifying the
+ UUID of the VPC to which the database cluster will be assigned. If excluded,
+ the cluster when creating a new database cluster, it will be assigned to your
+ account's default VPC for the region. :code:`
`:code:`
`Requires
+ ``vpc:read`` scope.
+ "project_id": "str", # Optional. The ID of the project that the
+ database cluster is assigned to. If excluded when creating a new database
+ cluster, it will be assigned to your default
+ project.:code:`
`:code:`
`Requires ``project:read`` scope.
+ "rules": [
{
- "end_of_availability": "str", # Optional. A
- timestamp referring to the date when the particular version will no
- longer be available for creating new clusters. If null, the version
- does not have an end of availability timeline.
- "end_of_life": "str", # Optional. A timestamp
- referring to the date when the particular version will no longer be
- supported. If null, the version does not have an end of life
- timeline.
- "version": "str" # Optional. The engine version.
+ "type": "str", # The type of resource that the
+ firewall rule allows to access the database cluster. Required. Known
+ values are: "droplet", "k8s", "ip_addr", "tag", and "app".
+ "value": "str", # The ID of the specific resource,
+ the name of a tag applied to a group of resources, or the IP address
+ that the firewall rule allows to access the database cluster.
+ Required.
+ "cluster_uuid": "str", # Optional. A unique ID for
+ the database cluster to which the rule is applied.
+ "created_at": "2020-02-20 00:00:00", # Optional. A
+ time value given in ISO8601 combined date and time format that
+ represents when the firewall rule was created.
+ "description": "str", # Optional. A human-readable
+ description of the rule.
+ "uuid": "str" # Optional. A unique ID for the
+ firewall rule itself.
}
],
- "pg": [
- {
- "end_of_availability": "str", # Optional. A
- timestamp referring to the date when the particular version will no
- longer be available for creating new clusters. If null, the version
- does not have an end of availability timeline.
- "end_of_life": "str", # Optional. A timestamp
- referring to the date when the particular version will no longer be
- supported. If null, the version does not have an end of life
- timeline.
- "version": "str" # Optional. The engine version.
- }
+ "schema_registry_connection": {
+ "host": "str", # Optional. The FQDN pointing to the schema
+ registry connection uri.
+ "password": "str", # Optional. The randomly generated
+ password for the schema registry.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the schema registry
+ is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. This is provided as a convenience
+ and should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the schema
+ registry.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "semantic_version": "str", # Optional. A string representing the
+ semantic version of the database engine in use for the cluster.
+ "standby_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "status": "str", # Optional. A string representing the current
+ status of the database cluster. Known values are: "creating", "online",
+ "resizing", "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added to the
+ cluster, in MiB. If null, no additional storage is added to the cluster,
+ beyond what is provided as a base amount from the 'size' and any previously
+ added additional storage.
+ "tags": [
+ "str" # Optional. An array of tags that have been applied to
+ the database cluster. :code:`
`:code:`
`Requires ``tag:read``
+ scope.
],
- "redis": [
+ "ui_connection": {
+ "host": "str", # Optional. The FQDN pointing to the
+ opensearch cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the opensearch
+ dashboard is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. This is provided as a convenience
+ and should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ opensearch dashboard.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "users": [
{
- "end_of_availability": "str", # Optional. A
- timestamp referring to the date when the particular version will no
- longer be available for creating new clusters. If null, the version
- does not have an end of availability timeline.
- "end_of_life": "str", # Optional. A timestamp
- referring to the date when the particular version will no longer be
- supported. If null, the version does not have an end of life
- timeline.
- "version": "str" # Optional. The engine version.
+ "name": "str", # The name of a database user.
+ Required.
+ "access_cert": "str", # Optional. Access certificate
+ for TLS client authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS
+ client authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying
+ the authentication method to be used for connections to the MySQL
+ user account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user,
+ the default for the version of MySQL in use will be used. As of
+ MySQL 8.0, the default is ``caching_sha2_password``. Required.
+ Known values are: "mysql_native_password" and
+ "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated
+ password for the database user.:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the
+ database user's role. The value will be either "primary" or "normal".
+ Known values are: "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", #
+ Permission set applied to the ACL. 'consume' allows for
+ messages to be consumed from the topic. 'produce' allows
+ for messages to be published to the topic.
+ 'produceconsume' allows for both 'consume' and 'produce'
+ permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete,
+ update). Required. Known values are: "admin", "consume",
+ "produce", and "produceconsume".
+ "topic": "str", # A regex
+ for matching the topic(s) that this ACL should apply to.
+ Required.
+ "id": "str" # Optional. An
+ identifier for the ACL. Will be computed after the ACL is
+ created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of
+ databases to which the user should have access. When the
+ database is set to ``admin``"" , the user will have
+ access to all databases based on the user's role i.e. a
+ user with the role ``readOnly`` assigned to the ``admin``
+ database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role
+ to assign to the user with each role mapping to a MongoDB
+ built-in role. ``readOnly`` maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and
+ "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional.
+ A regex for matching the indexes that this ACL should
+ apply to.
+ "permission": "str" #
+ Optional. Permission set applied to the ACL. 'read'
+ allows user to read from the index. 'write' allows for
+ user to write to the index. 'readwrite' allows for both
+ 'read' and 'write' permission. 'deny'(default) restricts
+ user from performing any operation over an index. 'admin'
+ allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin",
+ "read", "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For
+ Postgres clusters, set to ``true`` for a user with replication
+ rights. This option is not currently supported for other database
+ engines.
+ }
}
],
- "valkey": [
- {
- "end_of_availability": "str", # Optional. A
- timestamp referring to the date when the particular version will no
- longer be available for creating new clusters. If null, the version
- does not have an end of availability timeline.
- "end_of_life": "str", # Optional. A timestamp
- referring to the date when the particular version will no longer be
- supported. If null, the version does not have an end of life
- timeline.
- "version": "str" # Optional. The engine version.
- }
- ]
+ "version": "str", # Optional. A string representing the version of
+ the database engine in use for the cluster.
+ "version_end_of_availability": "str", # Optional. A timestamp
+ referring to the date when the particular version will no longer be available
+ for creating new clusters. If null, the version does not have an end of
+ availability timeline.
+ "version_end_of_life": "str" # Optional. A timestamp referring to
+ the date when the particular version will no longer be supported. If null,
+ the version does not have an end of life timeline.
}
}
# response body for status code(s): 404
@@ -104752,109 +111271,44 @@ async def list_options(self, **kwargs: Any) -> JSON:
tickets to help identify the issue.
}
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- _request = build_databases_list_options_request(
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- await self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200, 404]:
- if _stream:
- await response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
- @distributed_trace_async
- async def list_clusters(
- self, *, tag_name: Optional[str] = None, **kwargs: Any
+ @overload
+ async def create_cluster(
+ self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """List All Database Clusters.
+ """Create a New Database Cluster.
- To list all of the database clusters available on your account, send a GET request to
- ``/v2/databases``. To limit the results to database clusters with a specific tag, include the
- ``tag_name`` query parameter set to the name of the tag. For example,
- ``/v2/databases?tag_name=$TAG_NAME``.
+ To create a database cluster, send a POST request to ``/v2/databases``. To see a list of
+ options for each engine, such as available regions, size slugs, and versions, send a GET
+ request to the ``/v2/databases/options`` endpoint. The available sizes for the
+ ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see
+ `Managed Database Pricing `_.
- The result will be a JSON object with a ``databases`` key. This will be set to an array of
- database objects, each of which will contain the standard database attributes.
+ The create response returns a JSON object with a key called ``database``. The value of this is
+ an object that contains the standard attributes associated with a database cluster. The initial
+ value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready
+ to receive traffic, this changes to ``online``.
- The embedded ``connection`` and ``private_connection`` objects will contain the information
- needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and
- ``standby_private_connection`` objects will contain the information needed to connect to the
+ The embedded ``connection`` and ``private_connection`` objects contains the information needed
+ to access the database cluster. For multi-node clusters, the ``standby_connection`` and
+ ``standby_private_connection`` objects contain the information needed to connect to the
cluster's standby node(s).
- The embedded ``maintenance_window`` object will contain information about any scheduled
- maintenance for the database cluster.
+ DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To
+ create a new database cluster based on a backup of an existing cluster, send a POST request to
+ ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must
+ include a key named ``backup_restore`` with the name of the original database cluster and the
+ timestamp of the backup to be restored. Creating a database from a backup is the same as
+ forking a database in the control panel.
+ Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are
+ also not supported for Caching or Valkey clusters.
- :keyword tag_name: Limits the results to database clusters with a specific
- tag.:code:`
`:code:`
`Requires ``tag:read`` scope. Default value is None.
- :paramtype tag_name: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -104862,327 +111316,307 @@ async def list_clusters(
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # response body for status code(s): 201
response == {
- "databases": [
- {
- "engine": "str", # A slug representing the database engine
- used for the cluster. The possible values are: "pg" for PostgreSQL,
- "mysql" for MySQL, "redis" for Caching, "mongodb" for MongoDB, "kafka"
- for Kafka, "opensearch" for OpenSearch, and "valkey" for Valkey.
- Required. Known values are: "pg", "mysql", "redis", "valkey", "mongodb",
- "kafka", and "opensearch".
- "name": "str", # A unique, human-readable name referring to
- a database cluster. Required.
- "num_nodes": 0, # The number of nodes in the database
- cluster. Required.
- "region": "str", # The slug identifier for the region where
- the database cluster is located. Required.
- "size": "str", # The slug identifier representing the size
- of the nodes in the database cluster. Required.
- "connection": {
- "database": "str", # Optional. The name of the
- default database.
- "host": "str", # Optional. The FQDN pointing to the
- database cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- database cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the
- format accepted by the ``psql`` command. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time
- value given in ISO8601 combined date and time format that represents when
- the database cluster was created.
- "db_names": [
- "str" # Optional. An array of strings containing the
- names of databases created in the database cluster.
- ],
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs
- for the database cluster. Each CNAME must be a valid RFC 1123
- hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed,
- each up to 253 characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to
- identify and reference a database cluster.
- "maintenance_window": {
- "day": "str", # The day of the week on which to
- apply maintenance updates. Required.
- "hour": "str", # The hour in UTC at which
- maintenance updates will be applied in 24 hour format. Required.
- "description": [
- "str" # Optional. A list of strings, each
- containing information about a pending maintenance update.
- ],
- "pending": bool # Optional. A boolean value
- indicating whether any maintenance is scheduled to be performed in
- the next window.
- },
- "metrics_endpoints": [
- {
- "host": "str", # Optional. A FQDN pointing
- to the database cluster's node(s).
- "port": 0 # Optional. The port on which a
- service is listening.
- }
- ],
- "private_connection": {
- "database": "str", # Optional. The name of the
- default database.
- "host": "str", # Optional. The FQDN pointing to the
- database cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- database cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the
- format accepted by the ``psql`` command. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "private_network_uuid": "str", # Optional. A string
- specifying the UUID of the VPC to which the database cluster will be
- assigned. If excluded, the cluster when creating a new database cluster,
- it will be assigned to your account's default VPC for the region.
- :code:`
`:code:`
`Requires ``vpc:read`` scope.
- "project_id": "str", # Optional. The ID of the project that
- the database cluster is assigned to. If excluded when creating a new
- database cluster, it will be assigned to your default
- project.:code:`
`:code:`
`Requires ``project:read`` scope.
- "rules": [
- {
- "type": "str", # The type of resource that
- the firewall rule allows to access the database cluster.
- Required. Known values are: "droplet", "k8s", "ip_addr", "tag",
- and "app".
- "value": "str", # The ID of the specific
- resource, the name of a tag applied to a group of resources, or
- the IP address that the firewall rule allows to access the
- database cluster. Required.
- "cluster_uuid": "str", # Optional. A unique
- ID for the database cluster to which the rule is applied.
- "created_at": "2020-02-20 00:00:00", #
- Optional. A time value given in ISO8601 combined date and time
- format that represents when the firewall rule was created.
- "description": "str", # Optional. A
- human-readable description of the rule.
- "uuid": "str" # Optional. A unique ID for
- the firewall rule itself.
- }
- ],
- "schema_registry_connection": {
- "host": "str", # Optional. The FQDN pointing to the
- schema registry connection uri.
- "password": "str", # Optional. The randomly
- generated password for the schema
- registry.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the schema
- registry is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- schema registry.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "semantic_version": "str", # Optional. A string representing
- the semantic version of the database engine in use for the cluster.
- "standby_connection": {
- "database": "str", # Optional. The name of the
- default database.
- "host": "str", # Optional. The FQDN pointing to the
- database cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- database cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the
- format accepted by the ``psql`` command. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the
- default database.
- "host": "str", # Optional. The FQDN pointing to the
- database cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- database cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the
- format accepted by the ``psql`` command. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "status": "str", # Optional. A string representing the
- current status of the database cluster. Known values are: "creating",
- "online", "resizing", "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added
- to the cluster, in MiB. If null, no additional storage is added to the
- cluster, beyond what is provided as a base amount from the 'size' and any
- previously added additional storage.
- "tags": [
- "str" # Optional. An array of tags that have been
- applied to the database cluster. :code:`
`:code:`
`Requires
- ``tag:read`` scope.
+ "database": {
+ "engine": "str", # A slug representing the database engine used for
+ the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL,
+ "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch"
+ for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg",
+ "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch".
+ "name": "str", # A unique, human-readable name referring to a
+ database cluster. Required.
+ "num_nodes": 0, # The number of nodes in the database cluster.
+ Required.
+ "region": "str", # The slug identifier for the region where the
+ database cluster is located. Required.
+ "size": "str", # The slug identifier representing the size of the
+ nodes in the database cluster. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the database
+ cluster was created.
+ "db_names": [
+ "str" # Optional. An array of strings containing the names
+ of databases created in the database cluster.
+ ],
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs for the
+ database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
+ "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
+ characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to identify
+ and reference a database cluster.
+ "maintenance_window": {
+ "day": "str", # The day of the week on which to apply
+ maintenance updates. Required.
+ "hour": "str", # The hour in UTC at which maintenance
+ updates will be applied in 24 hour format. Required.
+ "description": [
+ "str" # Optional. A list of strings, each containing
+ information about a pending maintenance update.
],
- "ui_connection": {
- "host": "str", # Optional. The FQDN pointing to the
- opensearch cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- opensearch dashboard is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- opensearch dashboard.:code:`
`:code:`
`Requires
+ "pending": bool # Optional. A boolean value indicating
+ whether any maintenance is scheduled to be performed in the next window.
+ },
+ "metrics_endpoints": [
+ {
+ "host": "str", # Optional. A FQDN pointing to the
+ database cluster's node(s).
+ "port": 0 # Optional. The port on which a service is
+ listening.
+ }
+ ],
+ "private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "private_network_uuid": "str", # Optional. A string specifying the
+ UUID of the VPC to which the database cluster will be assigned. If excluded,
+ the cluster when creating a new database cluster, it will be assigned to your
+ account's default VPC for the region. :code:`
`:code:`
`Requires
+ ``vpc:read`` scope.
+ "project_id": "str", # Optional. The ID of the project that the
+ database cluster is assigned to. If excluded when creating a new database
+ cluster, it will be assigned to your default
+ project.:code:`
`:code:`
`Requires ``project:read`` scope.
+ "rules": [
+ {
+ "type": "str", # The type of resource that the
+ firewall rule allows to access the database cluster. Required. Known
+ values are: "droplet", "k8s", "ip_addr", "tag", and "app".
+ "value": "str", # The ID of the specific resource,
+ the name of a tag applied to a group of resources, or the IP address
+ that the firewall rule allows to access the database cluster.
+ Required.
+ "cluster_uuid": "str", # Optional. A unique ID for
+ the database cluster to which the rule is applied.
+ "created_at": "2020-02-20 00:00:00", # Optional. A
+ time value given in ISO8601 combined date and time format that
+ represents when the firewall rule was created.
+ "description": "str", # Optional. A human-readable
+ description of the rule.
+ "uuid": "str" # Optional. A unique ID for the
+ firewall rule itself.
+ }
+ ],
+ "schema_registry_connection": {
+ "host": "str", # Optional. The FQDN pointing to the schema
+ registry connection uri.
+ "password": "str", # Optional. The randomly generated
+ password for the schema registry.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the schema registry
+ is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. This is provided as a convenience
+ and should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the schema
+ registry.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "semantic_version": "str", # Optional. A string representing the
+ semantic version of the database engine in use for the cluster.
+ "standby_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "status": "str", # Optional. A string representing the current
+ status of the database cluster. Known values are: "creating", "online",
+ "resizing", "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added to the
+ cluster, in MiB. If null, no additional storage is added to the cluster,
+ beyond what is provided as a base amount from the 'size' and any previously
+ added additional storage.
+ "tags": [
+ "str" # Optional. An array of tags that have been applied to
+ the database cluster. :code:`
`:code:`
`Requires ``tag:read``
+ scope.
+ ],
+ "ui_connection": {
+ "host": "str", # Optional. The FQDN pointing to the
+ opensearch cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the opensearch
+ dashboard is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. This is provided as a convenience
+ and should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ opensearch dashboard.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "users": [
+ {
+ "name": "str", # The name of a database user.
+ Required.
+ "access_cert": "str", # Optional. Access certificate
+ for TLS client authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS
+ client authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying
+ the authentication method to be used for connections to the MySQL
+ user account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user,
+ the default for the version of MySQL in use will be used. As of
+ MySQL 8.0, the default is ``caching_sha2_password``. Required.
+ Known values are: "mysql_native_password" and
+ "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated
+ password for the database user.:code:`
`Requires
``database:view_credentials`` scope.
- },
- "users": [
- {
- "name": "str", # The name of a database
- user. Required.
- "access_cert": "str", # Optional. Access
- certificate for TLS client authentication. (Kafka only).
- "access_key": "str", # Optional. Access key
- for TLS client authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string
- specifying the authentication method to be used for
- connections to the MySQL user account. The valid values are
- ``mysql_native_password`` or ``caching_sha2_password``. If
- excluded when creating a new user, the default for the
- version of MySQL in use will be used. As of MySQL 8.0, the
- default is ``caching_sha2_password``. Required. Known values
- are: "mysql_native_password" and "caching_sha2_password".
- },
- "password": "str", # Optional. A randomly
- generated password for the database user.:code:`
`Requires
- ``database:view_credentials`` scope.
- "role": "str", # Optional. A string
- representing the database user's role. The value will be either
- "primary" or "normal". Known values are: "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str",
- # Permission set applied to the ACL. 'consume' allows
- for messages to be consumed from the topic. 'produce'
- allows for messages to be published to the topic.
- 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for
- 'produceconsume' as well as any operations to
- administer the topic (delete, update). Required.
- Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A
- regex for matching the topic(s) that this ACL should
- apply to. Required.
- "id": "str" #
- Optional. An identifier for the ACL. Will be computed
- after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A
- list of databases to which the user should have
- access. When the database is set to ``admin``"" , the
- user will have access to all databases based on the
- user's role i.e. a user with the role ``readOnly``
- assigned to the ``admin`` database will have read
- access to all databases.
- ],
- "role": "str" # Optional.
- The role to assign to the user with each role mapping to
- a MongoDB built-in role. ``readOnly`` maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and
- "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", #
- Optional. A regex for matching the indexes that this
- ACL should apply to.
- "permission": "str"
- # Optional. Permission set applied to the ACL. 'read'
- allows user to read from the index. 'write' allows
- for user to write to the index. 'readwrite' allows
- for both 'read' and 'write' permission.
- 'deny'(default) restricts user from performing any
- operation over an index. 'admin' allows for
- 'readwrite' as well as any operations to administer
- the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
+ "role": "str", # Optional. A string representing the
+ database user's role. The value will be either "primary" or "normal".
+ Known values are: "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", #
+ Permission set applied to the ACL. 'consume' allows for
+ messages to be consumed from the topic. 'produce' allows
+ for messages to be published to the topic.
+ 'produceconsume' allows for both 'consume' and 'produce'
+ permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete,
+ update). Required. Known values are: "admin", "consume",
+ "produce", and "produceconsume".
+ "topic": "str", # A regex
+ for matching the topic(s) that this ACL should apply to.
+ Required.
+ "id": "str" # Optional. An
+ identifier for the ACL. Will be computed after the ACL is
+ created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of
+ databases to which the user should have access. When the
+ database is set to ``admin``"" , the user will have
+ access to all databases based on the user's role i.e. a
+ user with the role ``readOnly`` assigned to the ``admin``
+ database will have read access to all databases.
],
- "pg_allow_replication": bool #
- Optional. For Postgres clusters, set to ``true`` for a user
- with replication rights. This option is not currently
- supported for other database engines.
- }
+ "role": "str" # Optional. The role
+ to assign to the user with each role mapping to a MongoDB
+ built-in role. ``readOnly`` maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and
+ "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional.
+ A regex for matching the indexes that this ACL should
+ apply to.
+ "permission": "str" #
+ Optional. Permission set applied to the ACL. 'read'
+ allows user to read from the index. 'write' allows for
+ user to write to the index. 'readwrite' allows for both
+ 'read' and 'write' permission. 'deny'(default) restricts
+ user from performing any operation over an index. 'admin'
+ allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin",
+ "read", "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For
+ Postgres clusters, set to ``true`` for a user with replication
+ rights. This option is not currently supported for other database
+ engines.
}
- ],
- "version": "str", # Optional. A string representing the
- version of the database engine in use for the cluster.
- "version_end_of_availability": "str", # Optional. A
- timestamp referring to the date when the particular version will no
- longer be available for creating new clusters. If null, the version does
- not have an end of availability timeline.
- "version_end_of_life": "str" # Optional. A timestamp
- referring to the date when the particular version will no longer be
- supported. If null, the version does not have an end of life timeline.
- }
- ]
+ }
+ ],
+ "version": "str", # Optional. A string representing the version of
+ the database engine in use for the cluster.
+ "version_end_of_availability": "str", # Optional. A timestamp
+ referring to the date when the particular version will no longer be available
+ for creating new clusters. If null, the version does not have an end of
+ availability timeline.
+ "version_end_of_life": "str" # Optional. A timestamp referring to
+ the date when the particular version will no longer be supported. If null,
+ the version does not have an end of life timeline.
+ }
}
# response body for status code(s): 404
response == {
@@ -105196,88 +111630,9 @@ async def list_clusters(
tickets to help identify the issue.
}
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- _request = build_databases_list_clusters_request(
- tag_name=tag_name,
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- await self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200, 404]:
- if _stream:
- await response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
- @overload
- async def create_cluster(
- self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
- ) -> JSON:
+ @distributed_trace_async
+ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
"""Create a New Database Cluster.
@@ -105306,11 +111661,8 @@ async def create_cluster(
Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are
also not supported for Caching or Valkey clusters.
- :param body: Required.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -105937,44 +112289,118 @@ async def create_cluster(
tickets to help identify the issue.
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
- @overload
- async def create_cluster(
- self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
- ) -> JSON:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = kwargs.pop("params", {}) or {}
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_databases_create_cluster_request(
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Create a New Database Cluster.
+ """Retrieve an Existing Database Cluster.
- To create a database cluster, send a POST request to ``/v2/databases``. To see a list of
- options for each engine, such as available regions, size slugs, and versions, send a GET
- request to the ``/v2/databases/options`` endpoint. The available sizes for the
- ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see
- `Managed Database Pricing `_.
+ To show information about an existing database cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID``.
- The create response returns a JSON object with a key called ``database``. The value of this is
- an object that contains the standard attributes associated with a database cluster. The initial
- value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready
- to receive traffic, this changes to ``online``.
+ The response will be a JSON object with a database key. This will be set to an object
+ containing the standard database cluster attributes.
- The embedded ``connection`` and ``private_connection`` objects contains the information needed
- to access the database cluster. For multi-node clusters, the ``standby_connection`` and
+ The embedded ``connection`` and ``private_connection`` objects will contain the information
+ needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and
``standby_private_connection`` objects contain the information needed to connect to the
cluster's standby node(s).
- DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To
- create a new database cluster based on a backup of an existing cluster, send a POST request to
- ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must
- include a key named ``backup_restore`` with the name of the original database cluster and the
- timestamp of the backup to be restored. Creating a database from a backup is the same as
- forking a database in the control panel.
- Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are
- also not supported for Caching or Valkey clusters.
+ The embedded maintenance_window object will contain information about any scheduled maintenance
+ for the database cluster.
- :param body: Required.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -105982,7 +112408,7 @@ async def create_cluster(
Example:
.. code-block:: python
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
"database": {
"engine": "str", # A slug representing the database engine used for
@@ -106296,37 +112722,915 @@ async def create_cluster(
tickets to help identify the issue.
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_databases_get_cluster_request(
+ database_cluster_uuid=database_cluster_uuid,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def destroy_cluster(
+ self, database_cluster_uuid: str, **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Destroy a Database Cluster.
+
+ To destroy a specific database, send a DELETE request to ``/v2/databases/$DATABASE_ID``.
+ A status of 204 will be given. This indicates that the request was processed successfully, but
+ that no response body is needed.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+
+ _request = build_databases_destroy_cluster_request(
+ database_cluster_uuid=database_cluster_uuid,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 204:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """Retrieve an Existing Database Cluster Configuration.
+
+ Shows configuration parameters for an existing database cluster by sending a GET request to
+ ``/v2/databases/$DATABASE_ID/config``.
+ The response is a JSON object with a ``config`` key, which is set to an object
+ containing any database configuration parameters.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "config": {}
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_databases_get_config_request(
+ database_cluster_uuid=database_cluster_uuid,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @overload
+ async def patch_config(
+ self,
+ database_cluster_uuid: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Update the Database Configuration for an Existing Database.
+
+ To update the configuration for an existing database cluster, send a PATCH request to
+ ``/v2/databases/$DATABASE_ID/config``.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "config": {}
+ }
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def patch_config(
+ self,
+ database_cluster_uuid: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Update the Database Configuration for an Existing Database.
+
+ To update the configuration for an existing database cluster, send a PATCH request to
+ ``/v2/databases/$DATABASE_ID/config``.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def patch_config(
+ self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Update the Database Configuration for an Existing Database.
+
+ To update the configuration for an existing database cluster, send a PATCH request to
+ ``/v2/databases/$DATABASE_ID/config``.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "config": {}
+ }
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = kwargs.pop("params", {}) or {}
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_databases_patch_config_request(
+ database_cluster_uuid=database_cluster_uuid,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """Retrieve the Public Certificate.
+
+ To retrieve the public certificate used to secure the connection to the database cluster send a
+ GET request to
+ ``/v2/databases/$DATABASE_ID/ca``.
+
+ The response will be a JSON object with a ``ca`` key. This will be set to an object
+ containing the base64 encoding of the public key certificate.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "ca": {
+ "certificate": "str" # base64 encoding of the certificate used to
+ secure database connections. Required.
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_databases_get_ca_request(
+ database_cluster_uuid=database_cluster_uuid,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def get_migration_status(
+ self, database_cluster_uuid: str, **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Retrieve the Status of an Online Migration.
+
+ To retrieve the status of the most recent online migration, send a GET request to
+ ``/v2/databases/$DATABASE_ID/online-migration``.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "created_at": "str", # Optional. The time the migration was initiated, in
+ ISO 8601 format.
+ "id": "str", # Optional. The ID of the most recent migration.
+ "status": "str" # Optional. The current status of the migration. Known
+ values are: "running", "syncing", "canceled", "error", and "done".
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_databases_get_migration_status_request(
+ database_cluster_uuid=database_cluster_uuid,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @overload
+ async def update_online_migration(
+ self,
+ database_cluster_uuid: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Start an Online Migration.
+
+ To start an online migration, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a
+ connection with an existing cluster and replicates its contents to the target cluster. Online
+ migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters.
+ If the existing database is continuously being written to, the migration process will continue
+ for up to two weeks unless it is manually stopped. Online migration is only available for
+ `MySQL
+ `_\\
+ , `PostgreSQL
+ `_\\ , `Caching
+ `_\\ , and `Valkey
+ `_ clusters.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "source": {
+ "dbname": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "username": "str" # Optional. The default user for the database.
+ },
+ "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to
+ the source database.
+ "ignore_dbs": [
+ "str" # Optional. List of databases that should be ignored during
+ migration.
+ ]
+ }
+
+ # response body for status code(s): 200
+ response == {
+ "created_at": "str", # Optional. The time the migration was initiated, in
+ ISO 8601 format.
+ "id": "str", # Optional. The ID of the most recent migration.
+ "status": "str" # Optional. The current status of the migration. Known
+ values are: "running", "syncing", "canceled", "error", and "done".
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
- @distributed_trace_async
- async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
+ @overload
+ async def update_online_migration(
+ self,
+ database_cluster_uuid: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Create a New Database Cluster.
+ """Start an Online Migration.
- To create a database cluster, send a POST request to ``/v2/databases``. To see a list of
- options for each engine, such as available regions, size slugs, and versions, send a GET
- request to the ``/v2/databases/options`` endpoint. The available sizes for the
- ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see
- `Managed Database Pricing `_.
+ To start an online migration, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a
+ connection with an existing cluster and replicates its contents to the target cluster. Online
+ migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters.
+ If the existing database is continuously being written to, the migration process will continue
+ for up to two weeks unless it is manually stopped. Online migration is only available for
+ `MySQL
+ `_\\
+ , `PostgreSQL
+ `_\\ , `Caching
+ `_\\ , and `Valkey
+ `_ clusters.
- The create response returns a JSON object with a key called ``database``. The value of this is
- an object that contains the standard attributes associated with a database cluster. The initial
- value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready
- to receive traffic, this changes to ``online``.
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
- The embedded ``connection`` and ``private_connection`` objects contains the information needed
- to access the database cluster. For multi-node clusters, the ``standby_connection`` and
- ``standby_private_connection`` objects contain the information needed to connect to the
- cluster's standby node(s).
+ Example:
+ .. code-block:: python
- DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To
- create a new database cluster based on a backup of an existing cluster, send a POST request to
- ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must
- include a key named ``backup_restore`` with the name of the original database cluster and the
- timestamp of the backup to be restored. Creating a database from a backup is the same as
- forking a database in the control panel.
- Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are
- also not supported for Caching or Valkey clusters.
+ # response body for status code(s): 200
+ response == {
+ "created_at": "str", # Optional. The time the migration was initiated, in
+ ISO 8601 format.
+ "id": "str", # Optional. The ID of the most recent migration.
+ "status": "str" # Optional. The current status of the migration. Known
+ values are: "running", "syncing", "canceled", "error", and "done".
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def update_online_migration(
+ self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Start an Online Migration.
+
+ To start an online migration, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a
+ connection with an existing cluster and replicates its contents to the target cluster. Online
+ migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters.
+ If the existing database is continuously being written to, the migration process will continue
+ for up to two weeks unless it is manually stopped. Online migration is only available for
+ `MySQL
+ `_\\
+ , `PostgreSQL
+ `_\\ , `Caching
+ `_\\ , and `Valkey
+ `_ clusters.
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
:return: JSON object
@@ -106338,611 +113642,382 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J
# JSON input template you can fill out and use as your body input.
body = {
- "engine": "str", # A slug representing the database engine used for the
- cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis"
- for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" for
- OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", "mysql",
- "redis", "valkey", "mongodb", "kafka", and "opensearch".
- "name": "str", # A unique, human-readable name referring to a database
- cluster. Required.
- "num_nodes": 0, # The number of nodes in the database cluster. Required.
- "region": "str", # The slug identifier for the region where the database
- cluster is located. Required.
- "size": "str", # The slug identifier representing the size of the nodes in
- the database cluster. Required.
- "autoscale": {
- "storage": {
- "enabled": bool, # Whether storage autoscaling is enabled
- for the cluster. Required.
- "increment_gib": 0, # Optional. The amount of additional
- storage to add (in GiB) when autoscaling is triggered.
- "threshold_percent": 0 # Optional. The storage usage
- threshold percentage that triggers autoscaling. When storage usage
- exceeds this percentage, additional storage will be added automatically.
- }
- },
- "backup_restore": {
- "database_name": "str", # The name of an existing database cluster
- from which the backup will be restored. Required.
- "backup_created_at": "2020-02-20 00:00:00" # Optional. The timestamp
- of an existing database cluster backup in ISO8601 combined date and time
- format. The most recent backup will be used if excluded.
- },
- "connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given in
- ISO8601 combined date and time format that represents when the database cluster
- was created.
- "db_names": [
- "str" # Optional. An array of strings containing the names of
- databases created in the database cluster.
- ],
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs for the database
- cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
- "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
- characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to identify and
- reference a database cluster.
- "maintenance_window": {
- "day": "str", # The day of the week on which to apply maintenance
- updates. Required.
- "hour": "str", # The hour in UTC at which maintenance updates will
- be applied in 24 hour format. Required.
- "description": [
- "str" # Optional. A list of strings, each containing
- information about a pending maintenance update.
- ],
- "pending": bool # Optional. A boolean value indicating whether any
- maintenance is scheduled to be performed in the next window.
- },
- "metrics_endpoints": [
- {
- "host": "str", # Optional. A FQDN pointing to the database
- cluster's node(s).
- "port": 0 # Optional. The port on which a service is
- listening.
- }
- ],
- "private_connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_network_uuid": "str", # Optional. A string specifying the UUID of
- the VPC to which the database cluster will be assigned. If excluded, the cluster
- when creating a new database cluster, it will be assigned to your account's
- default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` scope.
- "project_id": "str", # Optional. The ID of the project that the database
- cluster is assigned to. If excluded when creating a new database cluster, it will
- be assigned to your default project.:code:`
`:code:`
`Requires
- ``project:update`` scope.
- "rules": [
- {
- "type": "str", # The type of resource that the firewall rule
- allows to access the database cluster. Required. Known values are:
- "droplet", "k8s", "ip_addr", "tag", and "app".
- "value": "str", # The ID of the specific resource, the name
- of a tag applied to a group of resources, or the IP address that the
- firewall rule allows to access the database cluster. Required.
- "cluster_uuid": "str", # Optional. A unique ID for the
- database cluster to which the rule is applied.
- "created_at": "2020-02-20 00:00:00", # Optional. A time
- value given in ISO8601 combined date and time format that represents when
- the firewall rule was created.
- "description": "str", # Optional. A human-readable
- description of the rule.
- "uuid": "str" # Optional. A unique ID for the firewall rule
- itself.
- }
- ],
- "schema_registry_connection": {
- "host": "str", # Optional. The FQDN pointing to the schema registry
- connection uri.
- "password": "str", # Optional. The randomly generated password for
- the schema registry.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the schema registry is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the schema
- registry.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "semantic_version": "str", # Optional. A string representing the semantic
- version of the database engine in use for the cluster.
- "standby_connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the default database.
+ "source": {
+ "dbname": "str", # Optional. The name of the default database.
"host": "str", # Optional. The FQDN pointing to the database
cluster's current primary node.
"password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
+ the default user.
"port": 0, # Optional. The port on which the database cluster is
listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "status": "str", # Optional. A string representing the current status of the
- database cluster. Known values are: "creating", "online", "resizing",
- "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added to the cluster,
- in MiB. If null, no additional storage is added to the cluster, beyond what is
- provided as a base amount from the 'size' and any previously added additional
- storage.
- "tags": [
- "str" # Optional. An array of tags (as strings) to apply to the
- database cluster. :code:`
`:code:`
`Requires ``tag:create`` scope.
- ],
- "ui_connection": {
- "host": "str", # Optional. The FQDN pointing to the opensearch
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the opensearch dashboard is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the opensearch
- dashboard.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
+ "username": "str" # Optional. The default user for the database.
},
- "users": [
- {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS
- client authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL
- 8.0, the default is ``caching_sha2_password``. Required. Known values
- are: "mysql_native_password" and "caching_sha2_password".
- },
- "password": "str", # Optional. A randomly generated password
- for the database user.:code:`
`Requires ``database:view_credentials``
- scope.
- "role": "str", # Optional. A string representing the
- database user's role. The value will be either "primary" or "normal".
- Known values are: "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission
- set applied to the ACL. 'consume' allows for messages to be
- consumed from the topic. 'produce' allows for messages to be
- published to the topic. 'produceconsume' allows for both
- 'consume' and 'produce' permission. 'admin' allows for
- 'produceconsume' as well as any operations to administer the
- topic (delete, update). Required. Known values are: "admin",
- "consume", "produce", and "produceconsume".
- "topic": "str", # A regex for
- matching the topic(s) that this ACL should apply to.
- Required.
- "id": "str" # Optional. An
- identifier for the ACL. Will be computed after the ACL is
- created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of
- databases to which the user should have access. When the
- database is set to ``admin``"" , the user will have access to
- all databases based on the user's role i.e. a user with the
- role ``readOnly`` assigned to the ``admin`` database will
- have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign
- to the user with each role mapping to a MongoDB built-in role.
- ``readOnly`` maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex
- for matching the indexes that this ACL should apply to.
- "permission": "str" # Optional.
- Permission set applied to the ACL. 'read' allows user to read
- from the index. 'write' allows for user to write to the
- index. 'readwrite' allows for both 'read' and 'write'
- permission. 'deny'(default) restricts user from performing
- any operation over an index. 'admin' allows for 'readwrite'
- as well as any operations to administer the index. Known
- values are: "deny", "admin", "read", "readwrite", and
- "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For
- Postgres clusters, set to ``true`` for a user with replication
- rights. This option is not currently supported for other database
- engines.
- }
- }
- ],
- "version": "str", # Optional. A string representing the version of the
- database engine in use for the cluster.
- "version_end_of_availability": "str", # Optional. A timestamp referring to
- the date when the particular version will no longer be available for creating new
- clusters. If null, the version does not have an end of availability timeline.
- "version_end_of_life": "str" # Optional. A timestamp referring to the date
- when the particular version will no longer be supported. If null, the version
- does not have an end of life timeline.
+ "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to
+ the source database.
+ "ignore_dbs": [
+ "str" # Optional. List of databases that should be ignored during
+ migration.
+ ]
}
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
- "database": {
- "engine": "str", # A slug representing the database engine used for
- the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL,
- "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch"
- for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg",
- "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch".
- "name": "str", # A unique, human-readable name referring to a
- database cluster. Required.
- "num_nodes": 0, # The number of nodes in the database cluster.
- Required.
- "region": "str", # The slug identifier for the region where the
- database cluster is located. Required.
- "size": "str", # The slug identifier representing the size of the
- nodes in the database cluster. Required.
- "connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the database
- cluster was created.
- "db_names": [
- "str" # Optional. An array of strings containing the names
- of databases created in the database cluster.
- ],
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs for the
- database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
- "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
- characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to identify
- and reference a database cluster.
- "maintenance_window": {
- "day": "str", # The day of the week on which to apply
- maintenance updates. Required.
- "hour": "str", # The hour in UTC at which maintenance
- updates will be applied in 24 hour format. Required.
- "description": [
- "str" # Optional. A list of strings, each containing
- information about a pending maintenance update.
- ],
- "pending": bool # Optional. A boolean value indicating
- whether any maintenance is scheduled to be performed in the next window.
- },
- "metrics_endpoints": [
- {
- "host": "str", # Optional. A FQDN pointing to the
- database cluster's node(s).
- "port": 0 # Optional. The port on which a service is
- listening.
- }
- ],
- "private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_network_uuid": "str", # Optional. A string specifying the
- UUID of the VPC to which the database cluster will be assigned. If excluded,
- the cluster when creating a new database cluster, it will be assigned to your
- account's default VPC for the region. :code:`
`:code:`
`Requires
- ``vpc:read`` scope.
- "project_id": "str", # Optional. The ID of the project that the
- database cluster is assigned to. If excluded when creating a new database
- cluster, it will be assigned to your default
- project.:code:`
`:code:`
`Requires ``project:read`` scope.
- "rules": [
- {
- "type": "str", # The type of resource that the
- firewall rule allows to access the database cluster. Required. Known
- values are: "droplet", "k8s", "ip_addr", "tag", and "app".
- "value": "str", # The ID of the specific resource,
- the name of a tag applied to a group of resources, or the IP address
- that the firewall rule allows to access the database cluster.
- Required.
- "cluster_uuid": "str", # Optional. A unique ID for
- the database cluster to which the rule is applied.
- "created_at": "2020-02-20 00:00:00", # Optional. A
- time value given in ISO8601 combined date and time format that
- represents when the firewall rule was created.
- "description": "str", # Optional. A human-readable
- description of the rule.
- "uuid": "str" # Optional. A unique ID for the
- firewall rule itself.
- }
- ],
- "schema_registry_connection": {
- "host": "str", # Optional. The FQDN pointing to the schema
- registry connection uri.
- "password": "str", # Optional. The randomly generated
- password for the schema registry.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the schema registry
- is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. This is provided as a convenience
- and should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the schema
- registry.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "semantic_version": "str", # Optional. A string representing the
- semantic version of the database engine in use for the cluster.
- "standby_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "status": "str", # Optional. A string representing the current
- status of the database cluster. Known values are: "creating", "online",
- "resizing", "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added to the
- cluster, in MiB. If null, no additional storage is added to the cluster,
- beyond what is provided as a base amount from the 'size' and any previously
- added additional storage.
- "tags": [
- "str" # Optional. An array of tags that have been applied to
- the database cluster. :code:`
`:code:`
`Requires ``tag:read``
- scope.
- ],
- "ui_connection": {
- "host": "str", # Optional. The FQDN pointing to the
- opensearch cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the opensearch
- dashboard is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. This is provided as a convenience
- and should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- opensearch dashboard.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "users": [
- {
- "name": "str", # The name of a database user.
- Required.
- "access_cert": "str", # Optional. Access certificate
- for TLS client authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS
- client authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying
- the authentication method to be used for connections to the MySQL
- user account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user,
- the default for the version of MySQL in use will be used. As of
- MySQL 8.0, the default is ``caching_sha2_password``. Required.
- Known values are: "mysql_native_password" and
- "caching_sha2_password".
- },
- "password": "str", # Optional. A randomly generated
- password for the database user.:code:`
`Requires
- ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the
- database user's role. The value will be either "primary" or "normal".
- Known values are: "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", #
- Permission set applied to the ACL. 'consume' allows for
- messages to be consumed from the topic. 'produce' allows
- for messages to be published to the topic.
- 'produceconsume' allows for both 'consume' and 'produce'
- permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete,
- update). Required. Known values are: "admin", "consume",
- "produce", and "produceconsume".
- "topic": "str", # A regex
- for matching the topic(s) that this ACL should apply to.
- Required.
- "id": "str" # Optional. An
- identifier for the ACL. Will be computed after the ACL is
- created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of
- databases to which the user should have access. When the
- database is set to ``admin``"" , the user will have
- access to all databases based on the user's role i.e. a
- user with the role ``readOnly`` assigned to the ``admin``
- database will have read access to all databases.
- ],
- "role": "str" # Optional. The role
- to assign to the user with each role mapping to a MongoDB
- built-in role. ``readOnly`` maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and
- "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional.
- A regex for matching the indexes that this ACL should
- apply to.
- "permission": "str" #
- Optional. Permission set applied to the ACL. 'read'
- allows user to read from the index. 'write' allows for
- user to write to the index. 'readwrite' allows for both
- 'read' and 'write' permission. 'deny'(default) restricts
- user from performing any operation over an index. 'admin'
- allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin",
- "read", "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For
- Postgres clusters, set to ``true`` for a user with replication
- rights. This option is not currently supported for other database
- engines.
- }
- }
- ],
- "version": "str", # Optional. A string representing the version of
- the database engine in use for the cluster.
- "version_end_of_availability": "str", # Optional. A timestamp
- referring to the date when the particular version will no longer be available
- for creating new clusters. If null, the version does not have an end of
- availability timeline.
- "version_end_of_life": "str" # Optional. A timestamp referring to
- the date when the particular version will no longer be supported. If null,
- the version does not have an end of life timeline.
- }
+ "created_at": "str", # Optional. The time the migration was initiated, in
+ ISO 8601 format.
+ "id": "str", # Optional. The ID of the most recent migration.
+ "status": "str" # Optional. The current status of the migration. Known
+ values are: "running", "syncing", "canceled", "error", and "done".
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = kwargs.pop("params", {}) or {}
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_databases_update_online_migration_request(
+ database_cluster_uuid=database_cluster_uuid,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def delete_online_migration(
+ self, database_cluster_uuid: str, migration_id: str, **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Stop an Online Migration.
+
+ To stop an online migration, send a DELETE request to
+ ``/v2/databases/$DATABASE_ID/online-migration/$MIGRATION_ID``.
+
+ A status of 204 will be given. This indicates that the request was processed successfully, but
+ that no response body is needed.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param migration_id: A unique identifier assigned to the online migration. Required.
+ :type migration_id: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+
+ _request = build_databases_delete_online_migration_request(
+ database_cluster_uuid=database_cluster_uuid,
+ migration_id=migration_id,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 204:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def update_region(
+ self,
+ database_cluster_uuid: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Migrate a Database Cluster to a New Region.
+
+ To migrate a database cluster to a new region, send a ``PUT`` request to
+ ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a
+ ``region`` attribute.
+
+ A successful request will receive a 202 Accepted status code with no body in
+ response. Querying the database cluster will show that its ``status`` attribute
+ will now be set to ``migrating``. This will transition back to ``online`` when the
+ migration has completed.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "region": "str" # A slug identifier for the region to which the database
+ cluster will be migrated. Required.
+ }
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def update_region(
+ self,
+ database_cluster_uuid: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Migrate a Database Cluster to a New Region.
+
+ To migrate a database cluster to a new region, send a ``PUT`` request to
+ ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a
+ ``region`` attribute.
+
+ A successful request will receive a 202 Accepted status code with no body in
+ response. Querying the database cluster will show that its ``status`` attribute
+ will now be set to ``migrating``. This will transition back to ``online`` when the
+ migration has completed.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def update_region(
+ self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Migrate a Database Cluster to a New Region.
+
+ To migrate a database cluster to a new region, send a ``PUT`` request to
+ ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a
+ ``region`` attribute.
+
+ A successful request will receive a 202 Accepted status code with no body in
+ response. Querying the database cluster will show that its ``status`` attribute
+ will now be set to ``migrating``. This will transition back to ``online`` when the
+ migration has completed.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "region": "str" # A slug identifier for the region to which the database
+ cluster will be migrated. Required.
}
+
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -106974,7 +114049,7 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -106984,7 +114059,8 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J
else:
_json = body
- _request = build_databases_create_cluster_request(
+ _request = build_databases_update_region_request(
+ database_cluster_uuid=database_cluster_uuid,
content_type=content_type,
json=_json,
content=_content,
@@ -107002,14 +114078,15 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J
response = pipeline_response.http_response
- if response.status_code not in [201, 404]:
+ if response.status_code not in [202, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 201:
+ if response.status_code == 202:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -107020,11 +114097,6 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J
"int", response.headers.get("ratelimit-reset")
)
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -107042,340 +114114,150 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
- @distributed_trace_async
- async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ @overload
+ async def update_cluster_size(
+ self,
+ database_cluster_uuid: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Retrieve an Existing Database Cluster.
+ """Resize a Database Cluster.
- To show information about an existing database cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID``.
+ To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The
+ body of the request must specify both the size and num_nodes attributes.
+ A successful request will receive a 202 Accepted status code with no body in response. Querying
+ the database cluster will show that its status attribute will now be set to resizing. This will
+ transition back to online when the resize operation has completed.
- The response will be a JSON object with a database key. This will be set to an object
- containing the standard database cluster attributes.
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
- The embedded ``connection`` and ``private_connection`` objects will contain the information
- needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and
- ``standby_private_connection`` objects contain the information needed to connect to the
- cluster's standby node(s).
+ Example:
+ .. code-block:: python
- The embedded maintenance_window object will contain information about any scheduled maintenance
- for the database cluster.
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "num_nodes": 0, # The number of nodes in the database cluster. Valid values
+ are are 1-3. In addition to the primary node, up to two standby nodes may be
+ added for highly available configurations. Required.
+ "size": "str", # A slug identifier representing desired the size of the
+ nodes in the database cluster. Required.
+ "storage_size_mib": 0 # Optional. Additional storage added to the cluster,
+ in MiB. If null, no additional storage is added to the cluster, beyond what is
+ provided as a base amount from the 'size' and any previously added additional
+ storage.
+ }
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def update_cluster_size(
+ self,
+ database_cluster_uuid: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Resize a Database Cluster.
+
+ To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The
+ body of the request must specify both the size and num_nodes attributes.
+ A successful request will receive a 202 Accepted status code with no body in response. Querying
+ the database cluster will show that its status attribute will now be set to resizing. This will
+ transition back to online when the resize operation has completed.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :return: JSON object
- :rtype: JSON
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # response body for status code(s): 404
response == {
- "database": {
- "engine": "str", # A slug representing the database engine used for
- the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL,
- "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch"
- for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg",
- "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch".
- "name": "str", # A unique, human-readable name referring to a
- database cluster. Required.
- "num_nodes": 0, # The number of nodes in the database cluster.
- Required.
- "region": "str", # The slug identifier for the region where the
- database cluster is located. Required.
- "size": "str", # The slug identifier representing the size of the
- nodes in the database cluster. Required.
- "connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the database
- cluster was created.
- "db_names": [
- "str" # Optional. An array of strings containing the names
- of databases created in the database cluster.
- ],
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs for the
- database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
- "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
- characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to identify
- and reference a database cluster.
- "maintenance_window": {
- "day": "str", # The day of the week on which to apply
- maintenance updates. Required.
- "hour": "str", # The hour in UTC at which maintenance
- updates will be applied in 24 hour format. Required.
- "description": [
- "str" # Optional. A list of strings, each containing
- information about a pending maintenance update.
- ],
- "pending": bool # Optional. A boolean value indicating
- whether any maintenance is scheduled to be performed in the next window.
- },
- "metrics_endpoints": [
- {
- "host": "str", # Optional. A FQDN pointing to the
- database cluster's node(s).
- "port": 0 # Optional. The port on which a service is
- listening.
- }
- ],
- "private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_network_uuid": "str", # Optional. A string specifying the
- UUID of the VPC to which the database cluster will be assigned. If excluded,
- the cluster when creating a new database cluster, it will be assigned to your
- account's default VPC for the region. :code:`
`:code:`
`Requires
- ``vpc:read`` scope.
- "project_id": "str", # Optional. The ID of the project that the
- database cluster is assigned to. If excluded when creating a new database
- cluster, it will be assigned to your default
- project.:code:`
`:code:`
`Requires ``project:read`` scope.
- "rules": [
- {
- "type": "str", # The type of resource that the
- firewall rule allows to access the database cluster. Required. Known
- values are: "droplet", "k8s", "ip_addr", "tag", and "app".
- "value": "str", # The ID of the specific resource,
- the name of a tag applied to a group of resources, or the IP address
- that the firewall rule allows to access the database cluster.
- Required.
- "cluster_uuid": "str", # Optional. A unique ID for
- the database cluster to which the rule is applied.
- "created_at": "2020-02-20 00:00:00", # Optional. A
- time value given in ISO8601 combined date and time format that
- represents when the firewall rule was created.
- "description": "str", # Optional. A human-readable
- description of the rule.
- "uuid": "str" # Optional. A unique ID for the
- firewall rule itself.
- }
- ],
- "schema_registry_connection": {
- "host": "str", # Optional. The FQDN pointing to the schema
- registry connection uri.
- "password": "str", # Optional. The randomly generated
- password for the schema registry.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the schema registry
- is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. This is provided as a convenience
- and should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the schema
- registry.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "semantic_version": "str", # Optional. A string representing the
- semantic version of the database engine in use for the cluster.
- "standby_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "status": "str", # Optional. A string representing the current
- status of the database cluster. Known values are: "creating", "online",
- "resizing", "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added to the
- cluster, in MiB. If null, no additional storage is added to the cluster,
- beyond what is provided as a base amount from the 'size' and any previously
- added additional storage.
- "tags": [
- "str" # Optional. An array of tags that have been applied to
- the database cluster. :code:`
`:code:`
`Requires ``tag:read``
- scope.
- ],
- "ui_connection": {
- "host": "str", # Optional. The FQDN pointing to the
- opensearch cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the opensearch
- dashboard is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. This is provided as a convenience
- and should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- opensearch dashboard.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "users": [
- {
- "name": "str", # The name of a database user.
- Required.
- "access_cert": "str", # Optional. Access certificate
- for TLS client authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS
- client authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying
- the authentication method to be used for connections to the MySQL
- user account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user,
- the default for the version of MySQL in use will be used. As of
- MySQL 8.0, the default is ``caching_sha2_password``. Required.
- Known values are: "mysql_native_password" and
- "caching_sha2_password".
- },
- "password": "str", # Optional. A randomly generated
- password for the database user.:code:`
`Requires
- ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the
- database user's role. The value will be either "primary" or "normal".
- Known values are: "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", #
- Permission set applied to the ACL. 'consume' allows for
- messages to be consumed from the topic. 'produce' allows
- for messages to be published to the topic.
- 'produceconsume' allows for both 'consume' and 'produce'
- permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete,
- update). Required. Known values are: "admin", "consume",
- "produce", and "produceconsume".
- "topic": "str", # A regex
- for matching the topic(s) that this ACL should apply to.
- Required.
- "id": "str" # Optional. An
- identifier for the ACL. Will be computed after the ACL is
- created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of
- databases to which the user should have access. When the
- database is set to ``admin``"" , the user will have
- access to all databases based on the user's role i.e. a
- user with the role ``readOnly`` assigned to the ``admin``
- database will have read access to all databases.
- ],
- "role": "str" # Optional. The role
- to assign to the user with each role mapping to a MongoDB
- built-in role. ``readOnly`` maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and
- "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional.
- A regex for matching the indexes that this ACL should
- apply to.
- "permission": "str" #
- Optional. Permission set applied to the ACL. 'read'
- allows user to read from the index. 'write' allows for
- user to write to the index. 'readwrite' allows for both
- 'read' and 'write' permission. 'deny'(default) restricts
- user from performing any operation over an index. 'admin'
- allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin",
- "read", "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For
- Postgres clusters, set to ``true`` for a user with replication
- rights. This option is not currently supported for other database
- engines.
- }
- }
- ],
- "version": "str", # Optional. A string representing the version of
- the database engine in use for the cluster.
- "version_end_of_availability": "str", # Optional. A timestamp
- referring to the date when the particular version will no longer be available
- for creating new clusters. If null, the version does not have an end of
- availability timeline.
- "version_end_of_life": "str" # Optional. A timestamp referring to
- the date when the particular version will no longer be supported. If null,
- the version does not have an end of life timeline.
- }
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def update_cluster_size(
+ self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Resize a Database Cluster.
+
+ To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The
+ body of the request must specify both the size and num_nodes attributes.
+ A successful request will receive a 202 Accepted status code with no body in response. Querying
+ the database cluster will show that its status attribute will now be set to resizing. This will
+ transition back to online when the resize operation has completed.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "num_nodes": 0, # The number of nodes in the database cluster. Valid values
+ are are 1-3. In addition to the primary node, up to two standby nodes may be
+ added for highly available configurations. Required.
+ "size": "str", # A slug identifier representing desired the size of the
+ nodes in the database cluster. Required.
+ "storage_size_mib": 0 # Optional. Additional storage added to the cluster,
+ in MiB. If null, no additional storage is added to the cluster, beyond what is
+ provided as a base amount from the 'size' and any previously added additional
+ storage.
}
+
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -107401,13 +114283,27 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_databases_get_cluster_request(
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_databases_update_cluster_size_request(
database_cluster_uuid=database_cluster_uuid,
+ content_type=content_type,
+ json=_json,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -107422,14 +114318,15 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [202, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 202:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -107440,11 +114337,6 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
"int", response.headers.get("ratelimit-reset")
)
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -107462,30 +114354,52 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
@distributed_trace_async
- async def destroy_cluster(
+ async def list_firewall_rules(
self, database_cluster_uuid: str, **kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Destroy a Database Cluster.
+ """List Firewall Rules (Trusted Sources) for a Database Cluster.
- To destroy a specific database, send a DELETE request to ``/v2/databases/$DATABASE_ID``.
- A status of 204 will be given. This indicates that the request was processed successfully, but
- that no response body is needed.
+ To list all of a database cluster's firewall rules (known as "trusted sources" in the control
+ panel), send a GET request to ``/v2/databases/$DATABASE_ID/firewall``.
+ The result will be a JSON object with a ``rules`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
+ # response body for status code(s): 200
+ response == {
+ "rules": [
+ {
+ "type": "str", # The type of resource that the firewall rule
+ allows to access the database cluster. Required. Known values are:
+ "droplet", "k8s", "ip_addr", "tag", and "app".
+ "value": "str", # The ID of the specific resource, the name
+ of a tag applied to a group of resources, or the IP address that the
+ firewall rule allows to access the database cluster. Required.
+ "cluster_uuid": "str", # Optional. A unique ID for the
+ database cluster to which the rule is applied.
+ "created_at": "2020-02-20 00:00:00", # Optional. A time
+ value given in ISO8601 combined date and time format that represents when
+ the firewall rule was created.
+ "description": "str", # Optional. A human-readable
+ description of the rule.
+ "uuid": "str" # Optional. A unique ID for the firewall rule
+ itself.
+ }
+ ]
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -107514,9 +114428,9 @@ async def destroy_cluster(
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_destroy_cluster_request(
+ _request = build_databases_list_firewall_rules_request(
database_cluster_uuid=database_cluster_uuid,
headers=_headers,
params=_params,
@@ -107532,15 +114446,14 @@ async def destroy_cluster(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -107551,6 +114464,11 @@ async def destroy_cluster(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -107568,33 +114486,182 @@ async def destroy_cluster(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
- @distributed_trace_async
- async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ @overload
+ async def update_firewall_rules(
+ self,
+ database_cluster_uuid: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Retrieve an Existing Database Cluster Configuration.
+ """Update Firewall Rules (Trusted Sources) for a Database.
- Shows configuration parameters for an existing database cluster by sending a GET request to
- ``/v2/databases/$DATABASE_ID/config``.
- The response is a JSON object with a ``config`` key, which is set to an object
- containing any database configuration parameters.
+ To update a database cluster's firewall rules (known as "trusted sources" in the control
+ panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which
+ resources should be able to open connections to the database. You may limit connections to
+ specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or
+ Kubernetes node with that tag applied to it will have access. The firewall is limited to 100
+ rules (or trusted sources). When possible, we recommend `placing your databases into a VPC
+ network `_ to limit access to them
+ instead of using a firewall.
+ A successful.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :return: JSON object
- :rtype: JSON
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "rules": [
+ {
+ "type": "str", # The type of resource that the firewall rule
+ allows to access the database cluster. Required. Known values are:
+ "droplet", "k8s", "ip_addr", "tag", and "app".
+ "value": "str", # The ID of the specific resource, the name
+ of a tag applied to a group of resources, or the IP address that the
+ firewall rule allows to access the database cluster. Required.
+ "cluster_uuid": "str", # Optional. A unique ID for the
+ database cluster to which the rule is applied.
+ "created_at": "2020-02-20 00:00:00", # Optional. A time
+ value given in ISO8601 combined date and time format that represents when
+ the firewall rule was created.
+ "description": "str", # Optional. A human-readable
+ description of the rule.
+ "uuid": "str" # Optional. A unique ID for the firewall rule
+ itself.
+ }
+ ]
+ }
+
+ # response body for status code(s): 404
response == {
- "config": {}
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def update_firewall_rules(
+ self,
+ database_cluster_uuid: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Update Firewall Rules (Trusted Sources) for a Database.
+
+ To update a database cluster's firewall rules (known as "trusted sources" in the control
+ panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which
+ resources should be able to open connections to the database. You may limit connections to
+ specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or
+ Kubernetes node with that tag applied to it will have access. The firewall is limited to 100
+ rules (or trusted sources). When possible, we recommend `placing your databases into a VPC
+ network `_ to limit access to them
+ instead of using a firewall.
+ A successful.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def update_firewall_rules(
+ self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Update Firewall Rules (Trusted Sources) for a Database.
+
+ To update a database cluster's firewall rules (known as "trusted sources" in the control
+ panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which
+ resources should be able to open connections to the database. You may limit connections to
+ specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or
+ Kubernetes node with that tag applied to it will have access. The firewall is limited to 100
+ rules (or trusted sources). When possible, we recommend `placing your databases into a VPC
+ network `_ to limit access to them
+ instead of using a firewall.
+ A successful.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "rules": [
+ {
+ "type": "str", # The type of resource that the firewall rule
+ allows to access the database cluster. Required. Known values are:
+ "droplet", "k8s", "ip_addr", "tag", and "app".
+ "value": "str", # The ID of the specific resource, the name
+ of a tag applied to a group of resources, or the IP address that the
+ firewall rule allows to access the database cluster. Required.
+ "cluster_uuid": "str", # Optional. A unique ID for the
+ database cluster to which the rule is applied.
+ "created_at": "2020-02-20 00:00:00", # Optional. A time
+ value given in ISO8601 combined date and time format that represents when
+ the firewall rule was created.
+ "description": "str", # Optional. A human-readable
+ description of the rule.
+ "uuid": "str" # Optional. A unique ID for the firewall rule
+ itself.
+ }
+ ]
}
+
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -107620,13 +114687,27 @@ async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_databases_get_config_request(
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_databases_update_firewall_rules_request(
database_cluster_uuid=database_cluster_uuid,
+ content_type=content_type,
+ json=_json,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -107641,14 +114722,15 @@ async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -107659,11 +114741,6 @@ async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
"int", response.headers.get("ratelimit-reset")
)
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -107681,12 +114758,12 @@ async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
@overload
- async def patch_config(
+ async def update_maintenance_window(
self,
database_cluster_uuid: str,
body: JSON,
@@ -107695,10 +114772,11 @@ async def patch_config(
**kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Update the Database Configuration for an Existing Database.
+ """Configure a Database Cluster's Maintenance Window.
- To update the configuration for an existing database cluster, send a PATCH request to
- ``/v2/databases/$DATABASE_ID/config``.
+ To configure the window when automatic maintenance should be performed for a database cluster,
+ send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -107716,7 +114794,16 @@ async def patch_config(
# JSON input template you can fill out and use as your body input.
body = {
- "config": {}
+ "day": "str", # The day of the week on which to apply maintenance updates.
+ Required.
+ "hour": "str", # The hour in UTC at which maintenance updates will be
+ applied in 24 hour format. Required.
+ "description": [
+ "str" # Optional. A list of strings, each containing information
+ about a pending maintenance update.
+ ],
+ "pending": bool # Optional. A boolean value indicating whether any
+ maintenance is scheduled to be performed in the next window.
}
# response body for status code(s): 404
@@ -107733,7 +114820,7 @@ async def patch_config(
"""
@overload
- async def patch_config(
+ async def update_maintenance_window(
self,
database_cluster_uuid: str,
body: IO[bytes],
@@ -107742,10 +114829,11 @@ async def patch_config(
**kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Update the Database Configuration for an Existing Database.
+ """Configure a Database Cluster's Maintenance Window.
- To update the configuration for an existing database cluster, send a PATCH request to
- ``/v2/databases/$DATABASE_ID/config``.
+ To configure the window when automatic maintenance should be performed for a database cluster,
+ send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -107758,36 +114846,171 @@ async def patch_config(
:rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
- Example:
- .. code-block:: python
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def update_maintenance_window(
+ self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Configure a Database Cluster's Maintenance Window.
+
+ To configure the window when automatic maintenance should be performed for a database cluster,
+ send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``.
+ A successful request will receive a 204 No Content status code with no body in response.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "day": "str", # The day of the week on which to apply maintenance updates.
+ Required.
+ "hour": "str", # The hour in UTC at which maintenance updates will be
+ applied in 24 hour format. Required.
+ "description": [
+ "str" # Optional. A list of strings, each containing information
+ about a pending maintenance update.
+ ],
+ "pending": bool # Optional. A boolean value indicating whether any
+ maintenance is scheduled to be performed in the next window.
+ }
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = kwargs.pop("params", {}) or {}
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_databases_update_maintenance_window_request(
+ database_cluster_uuid=database_cluster_uuid,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 204:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
+ return deserialized # type: ignore
@distributed_trace_async
- async def patch_config(
- self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ async def install_update(
+ self, database_cluster_uuid: str, **kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Update the Database Configuration for an Existing Database.
+ """Start Database Maintenance.
- To update the configuration for an existing database cluster, send a PATCH request to
- ``/v2/databases/$DATABASE_ID/config``.
+ To start the installation of updates for a database cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/install_update``.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
- :type body: JSON or IO[bytes]
:return: JSON object or None
:rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
@@ -107795,11 +115018,6 @@ async def patch_config(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "config": {}
- }
-
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -107825,27 +115043,13 @@ async def patch_config(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- _json = body
-
- _request = build_databases_patch_config_request(
+ _request = build_databases_install_update_request(
database_cluster_uuid=database_cluster_uuid,
- content_type=content_type,
- json=_json,
- content=_content,
headers=_headers,
params=_params,
)
@@ -107860,7 +115064,7 @@ async def patch_config(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -107868,7 +115072,7 @@ async def patch_config(
deserialized = None
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -107901,16 +115105,16 @@ async def patch_config(
return deserialized # type: ignore
@distributed_trace_async
- async def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ async def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Retrieve the Public Certificate.
-
- To retrieve the public certificate used to secure the connection to the database cluster send a
- GET request to
- ``/v2/databases/$DATABASE_ID/ca``.
+ """List Backups for a Database Cluster.
- The response will be a JSON object with a ``ca`` key. This will be set to an object
- containing the base64 encoding of the public key certificate.
+ To list all of the available backups of a PostgreSQL or MySQL database cluster, send a GET
+ request to ``/v2/databases/$DATABASE_ID/backups``.
+ **Note**\\ : Backups are not supported for Caching or Valkey clusters.
+ The result will be a JSON object with a ``backups key``. This will be set to an array of backup
+ objects, each of which will contain the size of the backup and the timestamp at which it was
+ created.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -107923,9 +115127,27 @@ async def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "ca": {
- "certificate": "str" # base64 encoding of the certificate used to
- secure database connections. Required.
+ "backups": [
+ {
+ "created_at": "2020-02-20 00:00:00", # A time value given in
+ ISO8601 combined date and time format at which the backup was created.
+ Required.
+ "size_gigabytes": 0.0, # The size of the database backup in
+ GBs. Required.
+ "incremental": bool # Optional. Indicates if this backup is
+ a full or an incremental one (available only for MySQL).
+ }
+ ],
+ "backup_progress": "str", # Optional. If a backup is currently in progress,
+ this attribute shows the percentage of completion. If no backup is in progress,
+ this attribute will be hidden.
+ "scheduled_backup_time": {
+ "backup_hour": 0, # Optional. The hour of the day when the backup is
+ scheduled (in UTC).
+ "backup_interval_hours": 0, # Optional. The frequency, in hours, at
+ which backups are taken.
+ "backup_minute": 0 # Optional. The minute of the hour when the
+ backup is scheduled.
}
}
# response body for status code(s): 404
@@ -107958,7 +115180,7 @@ async def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_ca_request(
+ _request = build_databases_list_backups_request(
database_cluster_uuid=database_cluster_uuid,
headers=_headers,
params=_params,
@@ -108019,14 +115241,17 @@ async def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_migration_status(
- self, database_cluster_uuid: str, **kwargs: Any
- ) -> JSON:
+ async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Retrieve the Status of an Online Migration.
+ """List All Read-only Replicas.
- To retrieve the status of the most recent online migration, send a GET request to
- ``/v2/databases/$DATABASE_ID/online-migration``.
+ To list all of the read-only replicas associated with a database cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/replicas``.
+
+ **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+
+ The result will be a JSON object with a ``replicas`` key. This will be set to an array of
+ database replica objects, each of which will contain the standard database replica attributes.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -108039,11 +115264,91 @@ async def get_migration_status(
# response body for status code(s): 200
response == {
- "created_at": "str", # Optional. The time the migration was initiated, in
- ISO 8601 format.
- "id": "str", # Optional. The ID of the most recent migration.
- "status": "str" # Optional. The current status of the migration. Known
- values are: "running", "syncing", "canceled", "error", and "done".
+ "replicas": [
+ {
+ "name": "str", # The name to give the read-only replicating.
+ Required.
+ "connection": {
+ "database": "str", # Optional. The name of the
+ default database.
+ "host": "str", # Optional. The FQDN pointing to the
+ database cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ database cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the
+ format accepted by the ``psql`` command. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. A time
+ value given in ISO8601 combined date and time format that represents when
+ the database cluster was created.
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs
+ for the database cluster. Each CNAME must be a valid RFC 1123
+ hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed,
+ each up to 253 characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to
+ identify and reference a database replica.
+ "private_connection": {
+ "database": "str", # Optional. The name of the
+ default database.
+ "host": "str", # Optional. The FQDN pointing to the
+ database cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ database cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the
+ format accepted by the ``psql`` command. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "private_network_uuid": "str", # Optional. A string
+ specifying the UUID of the VPC to which the read-only replica will be
+ assigned. If excluded, the replica will be assigned to your account's
+ default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read``
+ scope.
+ "region": "str", # Optional. A slug identifier for the
+ region where the read-only replica will be located. If excluded, the
+ replica will be placed in the same region as the cluster.
+ "size": "str", # Optional. A slug identifier representing
+ the size of the node for the read-only replica. The size of the replica
+ must be at least as large as the node size for the database cluster from
+ which it is replicating.
+ "status": "str", # Optional. A string representing the
+ current status of the database cluster. Known values are: "creating",
+ "online", "resizing", "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added
+ to the cluster, in MiB. If null, no additional storage is added to the
+ cluster, beyond what is provided as a base amount from the 'size' and any
+ previously added additional storage.
+ "tags": [
+ "str" # Optional. A flat array of tag names as
+ strings applied to the read-only
+ replica.:code:`
`:code:`
`Requires ``tag:read`` scope.
+ ]
+ }
+ ]
}
# response body for status code(s): 404
response == {
@@ -108075,7 +115380,7 @@ async def get_migration_status(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_migration_status_request(
+ _request = build_databases_list_replicas_request(
database_cluster_uuid=database_cluster_uuid,
headers=_headers,
params=_params,
@@ -108136,33 +115441,31 @@ async def get_migration_status(
return cast(JSON, deserialized) # type: ignore
@overload
- async def update_online_migration(
+ async def create_replica(
self,
database_cluster_uuid: str,
- body: JSON,
+ body: Optional[JSON] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Start an Online Migration.
+ """Create a Read-only Replica.
- To start an online migration, send a PUT request to
- ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a
- connection with an existing cluster and replicates its contents to the target cluster. Online
- migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters.
- If the existing database is continuously being written to, the migration process will continue
- for up to two weeks unless it is manually stopped. Online migration is only available for
- `MySQL
- `_\\
- , `PostgreSQL
- `_\\ , `Caching
- `_\\ , and `Valkey
- `_ clusters.
+ To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request
+ to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of
+ the node to be used, and the region where it will be located.
+
+ **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+
+ The response will be a JSON object with a key called ``replica``. The value of this will be an
+ object that contains the standard attributes associated with a database replica. The initial
+ value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is
+ ready to receive traffic, this will transition to ``active``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Required.
+ :param body: Default value is None.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
@@ -108176,31 +115479,160 @@ async def update_online_migration(
# JSON input template you can fill out and use as your body input.
body = {
- "source": {
- "dbname": "str", # Optional. The name of the default database.
+ "name": "str", # The name to give the read-only replicating. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default database.
"host": "str", # Optional. The FQDN pointing to the database
cluster's current primary node.
"password": "str", # Optional. The randomly generated password for
- the default user.
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
"port": 0, # Optional. The port on which the database cluster is
listening.
- "username": "str" # Optional. The default user for the database.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
},
- "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to
- the source database.
- "ignore_dbs": [
- "str" # Optional. List of databases that should be ignored during
- migration.
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given in
+ ISO8601 combined date and time format that represents when the database cluster
+ was created.
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs for the database
+ cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
+ "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
+ characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to identify and
+ reference a database replica.
+ "private_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "private_network_uuid": "str", # Optional. A string specifying the UUID of
+ the VPC to which the read-only replica will be assigned. If excluded, the replica
+ will be assigned to your account's default VPC for the region.
+ :code:`
`:code:`
`Requires ``vpc:read`` scope.
+ "region": "str", # Optional. A slug identifier for the region where the
+ read-only replica will be located. If excluded, the replica will be placed in the
+ same region as the cluster.
+ "size": "str", # Optional. A slug identifier representing the size of the
+ node for the read-only replica. The size of the replica must be at least as large
+ as the node size for the database cluster from which it is replicating.
+ "status": "str", # Optional. A string representing the current status of the
+ database cluster. Known values are: "creating", "online", "resizing",
+ "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added to the cluster,
+ in MiB. If null, no additional storage is added to the cluster, beyond what is
+ provided as a base amount from the 'size' and any previously added additional
+ storage.
+ "tags": [
+ "str" # Optional. A flat array of tag names as strings to apply to
+ the read-only replica after it is created. Tag names can either be existing
+ or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope.
]
}
- # response body for status code(s): 200
+ # response body for status code(s): 201
response == {
- "created_at": "str", # Optional. The time the migration was initiated, in
- ISO 8601 format.
- "id": "str", # Optional. The ID of the most recent migration.
- "status": "str" # Optional. The current status of the migration. Known
- values are: "running", "syncing", "canceled", "error", and "done".
+ "replica": {
+ "name": "str", # The name to give the read-only replicating.
+ Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the database
+ cluster was created.
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs for the
+ database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
+ "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
+ characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to identify
+ and reference a database replica.
+ "private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "private_network_uuid": "str", # Optional. A string specifying the
+ UUID of the VPC to which the read-only replica will be assigned. If excluded,
+ the replica will be assigned to your account's default VPC for the region.
+ :code:`
`:code:`
`Requires ``vpc:read`` scope.
+ "region": "str", # Optional. A slug identifier for the region where
+ the read-only replica will be located. If excluded, the replica will be
+ placed in the same region as the cluster.
+ "size": "str", # Optional. A slug identifier representing the size
+ of the node for the read-only replica. The size of the replica must be at
+ least as large as the node size for the database cluster from which it is
+ replicating.
+ "status": "str", # Optional. A string representing the current
+ status of the database cluster. Known values are: "creating", "online",
+ "resizing", "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added to the
+ cluster, in MiB. If null, no additional storage is added to the cluster,
+ beyond what is provided as a base amount from the 'size' and any previously
+ added additional storage.
+ "tags": [
+ "str" # Optional. A flat array of tag names as strings
+ applied to the read-only replica.:code:`
`:code:`
`Requires
+ ``tag:read`` scope.
+ ]
+ }
}
# response body for status code(s): 404
response == {
@@ -108216,33 +115648,31 @@ async def update_online_migration(
"""
@overload
- async def update_online_migration(
+ async def create_replica(
self,
database_cluster_uuid: str,
- body: IO[bytes],
+ body: Optional[IO[bytes]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Start an Online Migration.
+ """Create a Read-only Replica.
- To start an online migration, send a PUT request to
- ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a
- connection with an existing cluster and replicates its contents to the target cluster. Online
- migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters.
- If the existing database is continuously being written to, the migration process will continue
- for up to two weeks unless it is manually stopped. Online migration is only available for
- `MySQL
- `_\\
- , `PostgreSQL
- `_\\ , `Caching
- `_\\ , and `Valkey
- `_ clusters.
+ To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request
+ to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of
+ the node to be used, and the region where it will be located.
+
+ **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+
+ The response will be a JSON object with a key called ``replica``. The value of this will be an
+ object that contains the standard attributes associated with a database replica. The initial
+ value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is
+ ready to receive traffic, this will transition to ``active``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Required.
+ :param body: Default value is None.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
@@ -108254,13 +115684,86 @@ async def update_online_migration(
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # response body for status code(s): 201
response == {
- "created_at": "str", # Optional. The time the migration was initiated, in
- ISO 8601 format.
- "id": "str", # Optional. The ID of the most recent migration.
- "status": "str" # Optional. The current status of the migration. Known
- values are: "running", "syncing", "canceled", "error", and "done".
+ "replica": {
+ "name": "str", # The name to give the read-only replicating.
+ Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the database
+ cluster was created.
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs for the
+ database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
+ "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
+ characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to identify
+ and reference a database replica.
+ "private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "private_network_uuid": "str", # Optional. A string specifying the
+ UUID of the VPC to which the read-only replica will be assigned. If excluded,
+ the replica will be assigned to your account's default VPC for the region.
+ :code:`
`:code:`
`Requires ``vpc:read`` scope.
+ "region": "str", # Optional. A slug identifier for the region where
+ the read-only replica will be located. If excluded, the replica will be
+ placed in the same region as the cluster.
+ "size": "str", # Optional. A slug identifier representing the size
+ of the node for the read-only replica. The size of the replica must be at
+ least as large as the node size for the database cluster from which it is
+ replicating.
+ "status": "str", # Optional. A string representing the current
+ status of the database cluster. Known values are: "creating", "online",
+ "resizing", "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added to the
+ cluster, in MiB. If null, no additional storage is added to the cluster,
+ beyond what is provided as a base amount from the 'size' and any previously
+ added additional storage.
+ "tags": [
+ "str" # Optional. A flat array of tag names as strings
+ applied to the read-only replica.:code:`
`:code:`
`Requires
+ ``tag:read`` scope.
+ ]
+ }
}
# response body for status code(s): 404
response == {
@@ -108276,28 +115779,29 @@ async def update_online_migration(
"""
@distributed_trace_async
- async def update_online_migration(
- self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ async def create_replica(
+ self,
+ database_cluster_uuid: str,
+ body: Optional[Union[JSON, IO[bytes]]] = None,
+ **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Start an Online Migration.
+ """Create a Read-only Replica.
- To start an online migration, send a PUT request to
- ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a
- connection with an existing cluster and replicates its contents to the target cluster. Online
- migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters.
- If the existing database is continuously being written to, the migration process will continue
- for up to two weeks unless it is manually stopped. Online migration is only available for
- `MySQL
- `_\\
- , `PostgreSQL
- `_\\ , `Caching
- `_\\ , and `Valkey
- `_ clusters.
+ To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request
+ to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of
+ the node to be used, and the region where it will be located.
+
+ **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+
+ The response will be a JSON object with a key called ``replica``. The value of this will be an
+ object that contains the standard attributes associated with a database replica. The initial
+ value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is
+ ready to receive traffic, this will transition to ``active``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
:type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
@@ -108308,31 +115812,160 @@ async def update_online_migration(
# JSON input template you can fill out and use as your body input.
body = {
- "source": {
- "dbname": "str", # Optional. The name of the default database.
+ "name": "str", # The name to give the read-only replicating. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default database.
"host": "str", # Optional. The FQDN pointing to the database
cluster's current primary node.
"password": "str", # Optional. The randomly generated password for
- the default user.
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
"port": 0, # Optional. The port on which the database cluster is
listening.
- "username": "str" # Optional. The default user for the database.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
},
- "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to
- the source database.
- "ignore_dbs": [
- "str" # Optional. List of databases that should be ignored during
- migration.
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given in
+ ISO8601 combined date and time format that represents when the database cluster
+ was created.
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs for the database
+ cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
+ "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
+ characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to identify and
+ reference a database replica.
+ "private_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "private_network_uuid": "str", # Optional. A string specifying the UUID of
+ the VPC to which the read-only replica will be assigned. If excluded, the replica
+ will be assigned to your account's default VPC for the region.
+ :code:`
`:code:`
`Requires ``vpc:read`` scope.
+ "region": "str", # Optional. A slug identifier for the region where the
+ read-only replica will be located. If excluded, the replica will be placed in the
+ same region as the cluster.
+ "size": "str", # Optional. A slug identifier representing the size of the
+ node for the read-only replica. The size of the replica must be at least as large
+ as the node size for the database cluster from which it is replicating.
+ "status": "str", # Optional. A string representing the current status of the
+ database cluster. Known values are: "creating", "online", "resizing",
+ "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added to the cluster,
+ in MiB. If null, no additional storage is added to the cluster, beyond what is
+ provided as a base amount from the 'size' and any previously added additional
+ storage.
+ "tags": [
+ "str" # Optional. A flat array of tag names as strings to apply to
+ the read-only replica after it is created. Tag names can either be existing
+ or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope.
]
}
- # response body for status code(s): 200
+ # response body for status code(s): 201
response == {
- "created_at": "str", # Optional. The time the migration was initiated, in
- ISO 8601 format.
- "id": "str", # Optional. The ID of the most recent migration.
- "status": "str" # Optional. The current status of the migration. Known
- values are: "running", "syncing", "canceled", "error", and "done".
+ "replica": {
+ "name": "str", # The name to give the read-only replicating.
+ Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the database
+ cluster was created.
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs for the
+ database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
+ "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
+ characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to identify
+ and reference a database replica.
+ "private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "private_network_uuid": "str", # Optional. A string specifying the
+ UUID of the VPC to which the read-only replica will be assigned. If excluded,
+ the replica will be assigned to your account's default VPC for the region.
+ :code:`
`:code:`
`Requires ``vpc:read`` scope.
+ "region": "str", # Optional. A slug identifier for the region where
+ the read-only replica will be located. If excluded, the replica will be
+ placed in the same region as the cluster.
+ "size": "str", # Optional. A slug identifier representing the size
+ of the node for the read-only replica. The size of the replica must be at
+ least as large as the node size for the database cluster from which it is
+ replicating.
+ "status": "str", # Optional. A string representing the current
+ status of the database cluster. Known values are: "creating", "online",
+ "resizing", "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added to the
+ cluster, in MiB. If null, no additional storage is added to the cluster,
+ beyond what is provided as a base amount from the 'size' and any previously
+ added additional storage.
+ "tags": [
+ "str" # Optional. A flat array of tag names as strings
+ applied to the read-only replica.:code:`
`:code:`
`Requires
+ ``tag:read`` scope.
+ ]
+ }
}
# response body for status code(s): 404
response == {
@@ -108373,9 +116006,12 @@ async def update_online_migration(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- _json = body
+ if body is not None:
+ _json = body
+ else:
+ _json = None
- _request = build_databases_update_online_migration_request(
+ _request = build_databases_create_replica_request(
database_cluster_uuid=database_cluster_uuid,
content_type=content_type,
json=_json,
@@ -108394,14 +116030,14 @@ async def update_online_migration(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [201, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 201:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -108439,29 +116075,39 @@ async def update_online_migration(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def delete_online_migration(
- self, database_cluster_uuid: str, migration_id: str, **kwargs: Any
- ) -> Optional[JSON]:
+ async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Stop an Online Migration.
+ """List all Events Logs.
- To stop an online migration, send a DELETE request to
- ``/v2/databases/$DATABASE_ID/online-migration/$MIGRATION_ID``.
+ To list all of the cluster events, send a GET request to
+ ``/v2/databases/$DATABASE_ID/events``.
- A status of 204 will be given. This indicates that the request was processed successfully, but
- that no response body is needed.
+ The result will be a JSON object with a ``events`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param migration_id: A unique identifier assigned to the online migration. Required.
- :type migration_id: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
+ # response body for status code(s): 200
+ response == {
+ "events": [
+ {
+ "cluster_name": "str", # Optional. The name of cluster.
+ "create_time": "str", # Optional. The time of the generation
+ of a event.
+ "event_type": "str", # Optional. Type of the event. Known
+ values are: "cluster_maintenance_perform", "cluster_master_promotion",
+ "cluster_create", "cluster_update", "cluster_delete", "cluster_poweron",
+ and "cluster_poweroff".
+ "id": "str" # Optional. ID of the particular event.
+ }
+ ]
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -108490,11 +116136,10 @@ async def delete_online_migration(
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_delete_online_migration_request(
+ _request = build_databases_list_events_logs_request(
database_cluster_uuid=database_cluster_uuid,
- migration_id=migration_id,
headers=_headers,
params=_params,
)
@@ -108509,15 +116154,14 @@ async def delete_online_migration(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -108528,6 +116172,11 @@ async def delete_online_migration(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -108545,51 +116194,117 @@ async def delete_online_migration(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
- @overload
- async def update_region(
- self,
- database_cluster_uuid: str,
- body: JSON,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> Optional[JSON]:
+ @distributed_trace_async
+ async def get_replica(
+ self, database_cluster_uuid: str, replica_name: str, **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Migrate a Database Cluster to a New Region.
+ """Retrieve an Existing Read-only Replica.
- To migrate a database cluster to a new region, send a ``PUT`` request to
- ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a
- ``region`` attribute.
+ To show information about an existing database replica, send a GET request to
+ ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``.
- A successful request will receive a 202 Accepted status code with no body in
- response. Querying the database cluster will show that its ``status`` attribute
- will now be set to ``migrating``. This will transition back to ``online`` when the
- migration has completed.
+ **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+
+ The response will be a JSON object with a ``replica key``. This will be set to an object
+ containing the standard database replica attributes.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Required.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :param replica_name: The name of the database replica. Required.
+ :type replica_name: str
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "region": "str" # A slug identifier for the region to which the database
- cluster will be migrated. Required.
+ # response body for status code(s): 200
+ response == {
+ "replica": {
+ "name": "str", # The name to give the read-only replicating.
+ Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the database
+ cluster was created.
+ "do_settings": {
+ "service_cnames": [
+ "str" # Optional. An array of custom CNAMEs for the
+ database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
+ "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
+ characters.
+ ]
+ },
+ "id": "str", # Optional. A unique ID that can be used to identify
+ and reference a database replica.
+ "private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "private_network_uuid": "str", # Optional. A string specifying the
+ UUID of the VPC to which the read-only replica will be assigned. If excluded,
+ the replica will be assigned to your account's default VPC for the region.
+ :code:`
`:code:`
`Requires ``vpc:read`` scope.
+ "region": "str", # Optional. A slug identifier for the region where
+ the read-only replica will be located. If excluded, the replica will be
+ placed in the same region as the cluster.
+ "size": "str", # Optional. A slug identifier representing the size
+ of the node for the read-only replica. The size of the replica must be at
+ least as large as the node size for the database cluster from which it is
+ replicating.
+ "status": "str", # Optional. A string representing the current
+ status of the database cluster. Known values are: "creating", "online",
+ "resizing", "migrating", and "forking".
+ "storage_size_mib": 0, # Optional. Additional storage added to the
+ cluster, in MiB. If null, no additional storage is added to the cluster,
+ beyond what is provided as a base amount from the 'size' and any previously
+ added additional storage.
+ "tags": [
+ "str" # Optional. A flat array of tag names as strings
+ applied to the read-only replica.:code:`
`:code:`
`Requires
+ ``tag:read`` scope.
+ ]
+ }
}
-
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -108602,75 +116317,104 @@ async def update_region(
tickets to help identify the issue.
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
- @overload
- async def update_region(
- self,
- database_cluster_uuid: str,
- body: IO[bytes],
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Migrate a Database Cluster to a New Region.
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
- To migrate a database cluster to a new region, send a ``PUT`` request to
- ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a
- ``region`` attribute.
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- A successful request will receive a 202 Accepted status code with no body in
- response. Querying the database cluster will show that its ``status`` attribute
- will now be set to ``migrating``. This will transition back to ``online`` when the
- migration has completed.
+ _request = build_databases_get_replica_request(
+ database_cluster_uuid=database_cluster_uuid,
+ replica_name=replica_name,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Required.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
- :raises ~azure.core.exceptions.HttpResponseError:
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
- Example:
- .. code-block:: python
+ response = pipeline_response.http_response
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def update_region(
- self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ async def destroy_replica(
+ self, database_cluster_uuid: str, replica_name: str, **kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Migrate a Database Cluster to a New Region.
+ """Destroy a Read-only Replica.
- To migrate a database cluster to a new region, send a ``PUT`` request to
- ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a
- ``region`` attribute.
+ To destroy a specific read-only replica, send a DELETE request to
+ ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``.
- A successful request will receive a 202 Accepted status code with no body in
- response. Querying the database cluster will show that its ``status`` attribute
- will now be set to ``migrating``. This will transition back to ``online`` when the
- migration has completed.
+ **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+
+ A status of 204 will be given. This indicates that the request was processed successfully, but
+ that no response body is needed.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
- :type body: JSON or IO[bytes]
+ :param replica_name: The name of the database replica. Required.
+ :type replica_name: str
:return: JSON object or None
:rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
@@ -108678,12 +116422,6 @@ async def update_region(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "region": "str" # A slug identifier for the region to which the database
- cluster will be migrated. Required.
- }
-
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -108709,27 +116447,14 @@ async def update_region(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- _json = body
-
- _request = build_databases_update_region_request(
+ _request = build_databases_destroy_replica_request(
database_cluster_uuid=database_cluster_uuid,
- content_type=content_type,
- json=_json,
- content=_content,
+ replica_name=replica_name,
headers=_headers,
params=_params,
)
@@ -108744,7 +116469,7 @@ async def update_region(
response = pipeline_response.http_response
- if response.status_code not in [202, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -108752,7 +116477,7 @@ async def update_region(
deserialized = None
response_headers = {}
- if response.status_code == 202:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -108784,126 +116509,25 @@ async def update_region(
return deserialized # type: ignore
- @overload
- async def update_cluster_size(
- self,
- database_cluster_uuid: str,
- body: JSON,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Resize a Database Cluster.
-
- To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The
- body of the request must specify both the size and num_nodes attributes.
- A successful request will receive a 202 Accepted status code with no body in response. Querying
- the database cluster will show that its status attribute will now be set to resizing. This will
- transition back to online when the resize operation has completed.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Required.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "num_nodes": 0, # The number of nodes in the database cluster. Valid values
- are are 1-3. In addition to the primary node, up to two standby nodes may be
- added for highly available configurations. Required.
- "size": "str", # A slug identifier representing desired the size of the
- nodes in the database cluster. Required.
- "storage_size_mib": 0 # Optional. Additional storage added to the cluster,
- in MiB. If null, no additional storage is added to the cluster, beyond what is
- provided as a base amount from the 'size' and any previously added additional
- storage.
- }
-
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
-
- @overload
- async def update_cluster_size(
- self,
- database_cluster_uuid: str,
- body: IO[bytes],
- *,
- content_type: str = "application/json",
- **kwargs: Any
+ @distributed_trace_async
+ async def promote_replica(
+ self, database_cluster_uuid: str, replica_name: str, **kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Resize a Database Cluster.
-
- To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The
- body of the request must specify both the size and num_nodes attributes.
- A successful request will receive a 202 Accepted status code with no body in response. Querying
- the database cluster will show that its status attribute will now be set to resizing. This will
- transition back to online when the resize operation has completed.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Required.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
+ """Promote a Read-only Replica to become a Primary Cluster.
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
+ To promote a specific read-only replica, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME/promote``.
- @distributed_trace_async
- async def update_cluster_size(
- self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Resize a Database Cluster.
+ **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
- To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The
- body of the request must specify both the size and num_nodes attributes.
- A successful request will receive a 202 Accepted status code with no body in response. Querying
- the database cluster will show that its status attribute will now be set to resizing. This will
- transition back to online when the resize operation has completed.
+ A status of 204 will be given. This indicates that the request was processed successfully, but
+ that no response body is needed.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
- :type body: JSON or IO[bytes]
+ :param replica_name: The name of the database replica. Required.
+ :type replica_name: str
:return: JSON object or None
:rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
@@ -108911,19 +116535,6 @@ async def update_cluster_size(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "num_nodes": 0, # The number of nodes in the database cluster. Valid values
- are are 1-3. In addition to the primary node, up to two standby nodes may be
- added for highly available configurations. Required.
- "size": "str", # A slug identifier representing desired the size of the
- nodes in the database cluster. Required.
- "storage_size_mib": 0 # Optional. Additional storage added to the cluster,
- in MiB. If null, no additional storage is added to the cluster, beyond what is
- provided as a base amount from the 'size' and any previously added additional
- storage.
- }
-
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -108949,27 +116560,14 @@ async def update_cluster_size(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- _json = body
-
- _request = build_databases_update_cluster_size_request(
+ _request = build_databases_promote_replica_request(
database_cluster_uuid=database_cluster_uuid,
- content_type=content_type,
- json=_json,
- content=_content,
+ replica_name=replica_name,
headers=_headers,
params=_params,
)
@@ -108984,7 +116582,7 @@ async def update_cluster_size(
response = pipeline_response.http_response
- if response.status_code not in [202, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -108992,7 +116590,7 @@ async def update_cluster_size(
deserialized = None
response_headers = {}
- if response.status_code == 202:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -109025,15 +116623,23 @@ async def update_cluster_size(
return deserialized # type: ignore
@distributed_trace_async
- async def list_firewall_rules(
- self, database_cluster_uuid: str, **kwargs: Any
- ) -> JSON:
+ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """List Firewall Rules (Trusted Sources) for a Database Cluster.
+ """List all Database Users.
- To list all of a database cluster's firewall rules (known as "trusted sources" in the control
- panel), send a GET request to ``/v2/databases/$DATABASE_ID/firewall``.
- The result will be a JSON object with a ``rules`` key.
+ To list all of the users for your database cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/users``.
+
+ Note: User management is not supported for Caching or Valkey clusters.
+
+ The result will be a JSON object with a ``users`` key. This will be set to an array
+ of database user objects, each of which will contain the standard database user attributes.
+ User passwords will not show without the ``database:view_credentials`` scope.
+
+ For MySQL clusters, additional options will be contained in the mysql_settings object.
+
+ For MongoDB clusters, additional information will be contained in the mongo_user_settings
+ object.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -109046,23 +116652,86 @@ async def list_firewall_rules(
# response body for status code(s): 200
response == {
- "rules": [
+ "users": [
{
- "type": "str", # The type of resource that the firewall rule
- allows to access the database cluster. Required. Known values are:
- "droplet", "k8s", "ip_addr", "tag", and "app".
- "value": "str", # The ID of the specific resource, the name
- of a tag applied to a group of resources, or the IP address that the
- firewall rule allows to access the database cluster. Required.
- "cluster_uuid": "str", # Optional. A unique ID for the
- database cluster to which the rule is applied.
- "created_at": "2020-02-20 00:00:00", # Optional. A time
- value given in ISO8601 combined date and time format that represents when
- the firewall rule was created.
- "description": "str", # Optional. A human-readable
- description of the rule.
- "uuid": "str" # Optional. A unique ID for the firewall rule
- itself.
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS
+ client authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL
+ 8.0, the default is ``caching_sha2_password``. Required. Known values
+ are: "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password
+ for the database user.:code:`
`Requires ``database:view_credentials``
+ scope.
+ "role": "str", # Optional. A string representing the
+ database user's role. The value will be either "primary" or "normal".
+ Known values are: "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission
+ set applied to the ACL. 'consume' allows for messages to be
+ consumed from the topic. 'produce' allows for messages to be
+ published to the topic. 'produceconsume' allows for both
+ 'consume' and 'produce' permission. 'admin' allows for
+ 'produceconsume' as well as any operations to administer the
+ topic (delete, update). Required. Known values are: "admin",
+ "consume", "produce", and "produceconsume".
+ "topic": "str", # A regex for
+ matching the topic(s) that this ACL should apply to.
+ Required.
+ "id": "str" # Optional. An
+ identifier for the ACL. Will be computed after the ACL is
+ created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of
+ databases to which the user should have access. When the
+ database is set to ``admin``"" , the user will have access to
+ all databases based on the user's role i.e. a user with the
+ role ``readOnly`` assigned to the ``admin`` database will
+ have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign
+ to the user with each role mapping to a MongoDB built-in role.
+ ``readOnly`` maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex
+ for matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional.
+ Permission set applied to the ACL. 'read' allows user to read
+ from the index. 'write' allows for user to write to the
+ index. 'readwrite' allows for both 'read' and 'write'
+ permission. 'deny'(default) restricts user from performing
+ any operation over an index. 'admin' allows for 'readwrite'
+ as well as any operations to administer the index. Known
+ values are: "deny", "admin", "read", "readwrite", and
+ "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For
+ Postgres clusters, set to ``true`` for a user with replication
+ rights. This option is not currently supported for other database
+ engines.
+ }
}
]
}
@@ -109096,7 +116765,7 @@ async def list_firewall_rules(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_list_firewall_rules_request(
+ _request = build_databases_list_users_request(
database_cluster_uuid=database_cluster_uuid,
headers=_headers,
params=_params,
@@ -109157,26 +116826,34 @@ async def list_firewall_rules(
return cast(JSON, deserialized) # type: ignore
@overload
- async def update_firewall_rules(
+ async def add_user(
self,
database_cluster_uuid: str,
body: JSON,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Update Firewall Rules (Trusted Sources) for a Database.
+ """Add a Database User.
- To update a database cluster's firewall rules (known as "trusted sources" in the control
- panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which
- resources should be able to open connections to the database. You may limit connections to
- specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or
- Kubernetes node with that tag applied to it will have access. The firewall is limited to 100
- rules (or trusted sources). When possible, we recommend `placing your databases into a VPC
- network `_ to limit access to them
- instead of using a firewall.
- A successful.
+ To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users``
+ with the desired username.
+
+ Note: User management is not supported for Caching or Valkey clusters.
+
+ When adding a user to a MySQL cluster, additional options can be configured in the
+ ``mysql_settings`` object.
+
+ When adding a user to a Kafka cluster, additional options can be configured in
+ the ``settings`` object.
+
+ When adding a user to a MongoDB cluster, additional options can be configured in
+ the ``settings.mongo_user_settings`` object.
+
+ The response will be a JSON object with a key called ``user``. The value of this will be an
+ object that contains the standard attributes associated with a database user including
+ its randomly generated password.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -109185,8 +116862,8 @@ async def update_firewall_rules(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -109194,27 +116871,158 @@ async def update_firewall_rules(
# JSON input template you can fill out and use as your body input.
body = {
- "rules": [
- {
- "type": "str", # The type of resource that the firewall rule
- allows to access the database cluster. Required. Known values are:
- "droplet", "k8s", "ip_addr", "tag", and "app".
- "value": "str", # The ID of the specific resource, the name
- of a tag applied to a group of resources, or the IP address that the
- firewall rule allows to access the database cluster. Required.
- "cluster_uuid": "str", # Optional. A unique ID for the
- database cluster to which the rule is applied.
- "created_at": "2020-02-20 00:00:00", # Optional. A time
- value given in ISO8601 combined date and time format that represents when
- the firewall rule was created.
- "description": "str", # Optional. A human-readable
- description of the rule.
- "uuid": "str" # Optional. A unique ID for the firewall rule
- itself.
- }
- ]
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client authentication.
+ (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the authentication method
+ to be used for connections to the MySQL user account. The valid values are
+ ``mysql_native_password`` or ``caching_sha2_password``. If excluded when
+ creating a new user, the default for the version of MySQL in use will be
+ used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required.
+ Known values are: "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "readonly": bool, # Optional. (To be deprecated: use
+ settings.mongo_user_settings.role instead for access controls to MongoDB
+ databases). For MongoDB clusters, set to ``true`` to create a read-only user.
+ This option is not currently supported for other database engines.
+ "role": "str", # Optional. A string representing the database user's role.
+ The value will be either "primary" or "normal". Known values are: "primary" and
+ "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set applied to the
+ ACL. 'consume' allows for messages to be consumed from the topic.
+ 'produce' allows for messages to be published to the topic.
+ 'produceconsume' allows for both 'consume' and 'produce' permission.
+ 'admin' allows for 'produceconsume' as well as any operations to
+ administer the topic (delete, update). Required. Known values are:
+ "admin", "consume", "produce", and "produceconsume".
+ "topic": "str", # A regex for matching the topic(s)
+ that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for the ACL.
+ Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to which the
+ user should have access. When the database is set to ``admin``"" ,
+ the user will have access to all databases based on the user's role
+ i.e. a user with the role ``readOnly`` assigned to the ``admin``
+ database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the user
+ with each role mapping to a MongoDB built-in role. ``readOnly`` maps to
+ a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for matching the
+ indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission set
+ applied to the ACL. 'read' allows user to read from the index.
+ 'write' allows for user to write to the index. 'readwrite' allows for
+ both 'read' and 'write' permission. 'deny'(default) restricts user
+ from performing any operation over an index. 'admin' allows for
+ 'readwrite' as well as any operations to administer the index. Known
+ values are: "deny", "admin", "read", "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres clusters, set
+ to ``true`` for a user with replication rights. This option is not currently
+ supported for other database engines.
+ }
}
+ # response body for status code(s): 201
+ response == {
+ "user": {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL 8.0,
+ the default is ``caching_sha2_password``. Required. Known values are:
+ "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the database user's
+ role. The value will be either "primary" or "normal". Known values are:
+ "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set
+ applied to the ACL. 'consume' allows for messages to be consumed
+ from the topic. 'produce' allows for messages to be published to
+ the topic. 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete, update).
+ Required. Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A regex for matching the
+ topic(s) that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for
+ the ACL. Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to
+ which the user should have access. When the database is set to
+ ``admin``"" , the user will have access to all databases based on
+ the user's role i.e. a user with the role ``readOnly`` assigned
+ to the ``admin`` database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the
+ user with each role mapping to a MongoDB built-in role. ``readOnly``
+ maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for
+ matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission
+ set applied to the ACL. 'read' allows user to read from the
+ index. 'write' allows for user to write to the index. 'readwrite'
+ allows for both 'read' and 'write' permission. 'deny'(default)
+ restricts user from performing any operation over an index.
+ 'admin' allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres
+ clusters, set to ``true`` for a user with replication rights. This option
+ is not currently supported for other database engines.
+ }
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -109229,26 +117037,34 @@ async def update_firewall_rules(
"""
@overload
- async def update_firewall_rules(
+ async def add_user(
self,
database_cluster_uuid: str,
body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Update Firewall Rules (Trusted Sources) for a Database.
+ """Add a Database User.
- To update a database cluster's firewall rules (known as "trusted sources" in the control
- panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which
- resources should be able to open connections to the database. You may limit connections to
- specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or
- Kubernetes node with that tag applied to it will have access. The firewall is limited to 100
- rules (or trusted sources). When possible, we recommend `placing your databases into a VPC
- network `_ to limit access to them
- instead of using a firewall.
- A successful.
+ To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users``
+ with the desired username.
+
+ Note: User management is not supported for Caching or Valkey clusters.
+
+ When adding a user to a MySQL cluster, additional options can be configured in the
+ ``mysql_settings`` object.
+
+ When adding a user to a Kafka cluster, additional options can be configured in
+ the ``settings`` object.
+
+ When adding a user to a MongoDB cluster, additional options can be configured in
+ the ``settings.mongo_user_settings`` object.
+
+ The response will be a JSON object with a key called ``user``. The value of this will be an
+ object that contains the standard attributes associated with a database user including
+ its randomly generated password.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -109257,13 +117073,90 @@ async def update_firewall_rules(
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
+ # response body for status code(s): 201
+ response == {
+ "user": {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL 8.0,
+ the default is ``caching_sha2_password``. Required. Known values are:
+ "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the database user's
+ role. The value will be either "primary" or "normal". Known values are:
+ "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set
+ applied to the ACL. 'consume' allows for messages to be consumed
+ from the topic. 'produce' allows for messages to be published to
+ the topic. 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete, update).
+ Required. Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A regex for matching the
+ topic(s) that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for
+ the ACL. Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to
+ which the user should have access. When the database is set to
+ ``admin``"" , the user will have access to all databases based on
+ the user's role i.e. a user with the role ``readOnly`` assigned
+ to the ``admin`` database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the
+ user with each role mapping to a MongoDB built-in role. ``readOnly``
+ maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for
+ matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission
+ set applied to the ACL. 'read' allows user to read from the
+ index. 'write' allows for user to write to the index. 'readwrite'
+ allows for both 'read' and 'write' permission. 'deny'(default)
+ restricts user from performing any operation over an index.
+ 'admin' allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres
+ clusters, set to ``true`` for a user with replication rights. This option
+ is not currently supported for other database engines.
+ }
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -109278,28 +117171,36 @@ async def update_firewall_rules(
"""
@distributed_trace_async
- async def update_firewall_rules(
+ async def add_user(
self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Update Firewall Rules (Trusted Sources) for a Database.
+ """Add a Database User.
- To update a database cluster's firewall rules (known as "trusted sources" in the control
- panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which
- resources should be able to open connections to the database. You may limit connections to
- specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or
- Kubernetes node with that tag applied to it will have access. The firewall is limited to 100
- rules (or trusted sources). When possible, we recommend `placing your databases into a VPC
- network `_ to limit access to them
- instead of using a firewall.
- A successful.
+ To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users``
+ with the desired username.
+
+ Note: User management is not supported for Caching or Valkey clusters.
+
+ When adding a user to a MySQL cluster, additional options can be configured in the
+ ``mysql_settings`` object.
+
+ When adding a user to a Kafka cluster, additional options can be configured in
+ the ``settings`` object.
+
+ When adding a user to a MongoDB cluster, additional options can be configured in
+ the ``settings.mongo_user_settings`` object.
+
+ The response will be a JSON object with a key called ``user``. The value of this will be an
+ object that contains the standard attributes associated with a database user including
+ its randomly generated password.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -109307,27 +117208,158 @@ async def update_firewall_rules(
# JSON input template you can fill out and use as your body input.
body = {
- "rules": [
- {
- "type": "str", # The type of resource that the firewall rule
- allows to access the database cluster. Required. Known values are:
- "droplet", "k8s", "ip_addr", "tag", and "app".
- "value": "str", # The ID of the specific resource, the name
- of a tag applied to a group of resources, or the IP address that the
- firewall rule allows to access the database cluster. Required.
- "cluster_uuid": "str", # Optional. A unique ID for the
- database cluster to which the rule is applied.
- "created_at": "2020-02-20 00:00:00", # Optional. A time
- value given in ISO8601 combined date and time format that represents when
- the firewall rule was created.
- "description": "str", # Optional. A human-readable
- description of the rule.
- "uuid": "str" # Optional. A unique ID for the firewall rule
- itself.
- }
- ]
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client authentication.
+ (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the authentication method
+ to be used for connections to the MySQL user account. The valid values are
+ ``mysql_native_password`` or ``caching_sha2_password``. If excluded when
+ creating a new user, the default for the version of MySQL in use will be
+ used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required.
+ Known values are: "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "readonly": bool, # Optional. (To be deprecated: use
+ settings.mongo_user_settings.role instead for access controls to MongoDB
+ databases). For MongoDB clusters, set to ``true`` to create a read-only user.
+ This option is not currently supported for other database engines.
+ "role": "str", # Optional. A string representing the database user's role.
+ The value will be either "primary" or "normal". Known values are: "primary" and
+ "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set applied to the
+ ACL. 'consume' allows for messages to be consumed from the topic.
+ 'produce' allows for messages to be published to the topic.
+ 'produceconsume' allows for both 'consume' and 'produce' permission.
+ 'admin' allows for 'produceconsume' as well as any operations to
+ administer the topic (delete, update). Required. Known values are:
+ "admin", "consume", "produce", and "produceconsume".
+ "topic": "str", # A regex for matching the topic(s)
+ that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for the ACL.
+ Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to which the
+ user should have access. When the database is set to ``admin``"" ,
+ the user will have access to all databases based on the user's role
+ i.e. a user with the role ``readOnly`` assigned to the ``admin``
+ database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the user
+ with each role mapping to a MongoDB built-in role. ``readOnly`` maps to
+ a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for matching the
+ indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission set
+ applied to the ACL. 'read' allows user to read from the index.
+ 'write' allows for user to write to the index. 'readwrite' allows for
+ both 'read' and 'write' permission. 'deny'(default) restricts user
+ from performing any operation over an index. 'admin' allows for
+ 'readwrite' as well as any operations to administer the index. Known
+ values are: "deny", "admin", "read", "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres clusters, set
+ to ``true`` for a user with replication rights. This option is not currently
+ supported for other database engines.
+ }
}
+ # response body for status code(s): 201
+ response == {
+ "user": {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL 8.0,
+ the default is ``caching_sha2_password``. Required. Known values are:
+ "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the database user's
+ role. The value will be either "primary" or "normal". Known values are:
+ "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set
+ applied to the ACL. 'consume' allows for messages to be consumed
+ from the topic. 'produce' allows for messages to be published to
+ the topic. 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete, update).
+ Required. Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A regex for matching the
+ topic(s) that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for
+ the ACL. Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to
+ which the user should have access. When the database is set to
+ ``admin``"" , the user will have access to all databases based on
+ the user's role i.e. a user with the role ``readOnly`` assigned
+ to the ``admin`` database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the
+ user with each role mapping to a MongoDB built-in role. ``readOnly``
+ maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for
+ matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission
+ set applied to the ACL. 'read' allows user to read from the
+ index. 'write' allows for user to write to the index. 'readwrite'
+ allows for both 'read' and 'write' permission. 'deny'(default)
+ restricts user from performing any operation over an index.
+ 'admin' allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres
+ clusters, set to ``true`` for a user with replication rights. This option
+ is not currently supported for other database engines.
+ }
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -109359,7 +117391,7 @@ async def update_firewall_rules(
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -109369,7 +117401,7 @@ async def update_firewall_rules(
else:
_json = body
- _request = build_databases_update_firewall_rules_request(
+ _request = build_databases_add_user_request(
database_cluster_uuid=database_cluster_uuid,
content_type=content_type,
json=_json,
@@ -109388,15 +117420,14 @@ async def update_firewall_rules(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [201, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 201:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -109407,6 +117438,11 @@ async def update_firewall_rules(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -109424,146 +117460,122 @@ async def update_firewall_rules(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
- @overload
- async def update_maintenance_window(
- self,
- database_cluster_uuid: str,
- body: JSON,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> Optional[JSON]:
+ @distributed_trace_async
+ async def get_user(
+ self, database_cluster_uuid: str, username: str, **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Configure a Database Cluster's Maintenance Window.
-
- To configure the window when automatic maintenance should be performed for a database cluster,
- send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``.
- A successful request will receive a 204 No Content status code with no body in response.
+ """Retrieve an Existing Database User.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Required.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
- :raises ~azure.core.exceptions.HttpResponseError:
+ To show information about an existing database user, send a GET request to
+ ``/v2/databases/$DATABASE_ID/users/$USERNAME``.
- Example:
- .. code-block:: python
+ Note: User management is not supported for Caching or Valkey clusters.
- # JSON input template you can fill out and use as your body input.
- body = {
- "day": "str", # The day of the week on which to apply maintenance updates.
- Required.
- "hour": "str", # The hour in UTC at which maintenance updates will be
- applied in 24 hour format. Required.
- "description": [
- "str" # Optional. A list of strings, each containing information
- about a pending maintenance update.
- ],
- "pending": bool # Optional. A boolean value indicating whether any
- maintenance is scheduled to be performed in the next window.
- }
+ The response will be a JSON object with a ``user`` key. This will be set to an object
+ containing the standard database user attributes. The user's password will not show
+ up unless the ``database:view_credentials`` scope is present.
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
+ For MySQL clusters, additional options will be contained in the ``mysql_settings``
+ object.
- @overload
- async def update_maintenance_window(
- self,
- database_cluster_uuid: str,
- body: IO[bytes],
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Configure a Database Cluster's Maintenance Window.
+ For Kafka clusters, additional options will be contained in the ``settings`` object.
- To configure the window when automatic maintenance should be performed for a database cluster,
- send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``.
- A successful request will receive a 204 No Content status code with no body in response.
+ For MongoDB clusters, additional information will be contained in the mongo_user_settings
+ object.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Required.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :param username: The name of the database user. Required.
+ :type username: str
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 404
+ # response body for status code(s): 200
response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
-
- @distributed_trace_async
- async def update_maintenance_window(
- self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Configure a Database Cluster's Maintenance Window.
-
- To configure the window when automatic maintenance should be performed for a database cluster,
- send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``.
- A successful request will receive a 204 No Content status code with no body in response.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
- :type body: JSON or IO[bytes]
- :return: JSON object or None
- :rtype: JSON or None
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "day": "str", # The day of the week on which to apply maintenance updates.
- Required.
- "hour": "str", # The hour in UTC at which maintenance updates will be
- applied in 24 hour format. Required.
- "description": [
- "str" # Optional. A list of strings, each containing information
- about a pending maintenance update.
- ],
- "pending": bool # Optional. A boolean value indicating whether any
- maintenance is scheduled to be performed in the next window.
+ "user": {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL 8.0,
+ the default is ``caching_sha2_password``. Required. Known values are:
+ "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the database user's
+ role. The value will be either "primary" or "normal". Known values are:
+ "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set
+ applied to the ACL. 'consume' allows for messages to be consumed
+ from the topic. 'produce' allows for messages to be published to
+ the topic. 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete, update).
+ Required. Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A regex for matching the
+ topic(s) that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for
+ the ACL. Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to
+ which the user should have access. When the database is set to
+ ``admin``"" , the user will have access to all databases based on
+ the user's role i.e. a user with the role ``readOnly`` assigned
+ to the ``admin`` database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the
+ user with each role mapping to a MongoDB built-in role. ``readOnly``
+ maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for
+ matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission
+ set applied to the ACL. 'read' allows user to read from the
+ index. 'write' allows for user to write to the index. 'readwrite'
+ allows for both 'read' and 'write' permission. 'deny'(default)
+ restricts user from performing any operation over an index.
+ 'admin' allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres
+ clusters, set to ``true`` for a user with replication rights. This option
+ is not currently supported for other database engines.
+ }
+ }
}
-
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -109589,27 +117601,14 @@ async def update_maintenance_window(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
-
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- _json = body
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_update_maintenance_window_request(
+ _request = build_databases_get_user_request(
database_cluster_uuid=database_cluster_uuid,
- content_type=content_type,
- json=_json,
- content=_content,
+ username=username,
headers=_headers,
params=_params,
)
@@ -109624,15 +117623,14 @@ async def update_maintenance_window(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -109643,6 +117641,11 @@ async def update_maintenance_window(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -109660,23 +117663,29 @@ async def update_maintenance_window(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def install_update(
- self, database_cluster_uuid: str, **kwargs: Any
+ async def delete_user(
+ self, database_cluster_uuid: str, username: str, **kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Start Database Maintenance.
+ """Remove a Database User.
- To start the installation of updates for a database cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/install_update``.
- A successful request will receive a 204 No Content status code with no body in response.
+ To remove a specific database user, send a DELETE request to
+ ``/v2/databases/$DATABASE_ID/users/$USERNAME``.
+
+ A status of 204 will be given. This indicates that the request was processed
+ successfully, but that no response body is needed.
+
+ Note: User management is not supported for Caching or Valkey clusters.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
+ :param username: The name of the database user. Required.
+ :type username: str
:return: JSON object or None
:rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
@@ -109714,8 +117723,9 @@ async def install_update(
cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_databases_install_update_request(
+ _request = build_databases_delete_user_request(
database_cluster_uuid=database_cluster_uuid,
+ username=username,
headers=_headers,
params=_params,
)
@@ -109770,20 +117780,41 @@ async def install_update(
return deserialized # type: ignore
- @distributed_trace_async
- async def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ @overload
+ async def update_user(
+ self,
+ database_cluster_uuid: str,
+ username: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """List Backups for a Database Cluster.
+ """Update a Database User.
- To list all of the available backups of a PostgreSQL or MySQL database cluster, send a GET
- request to ``/v2/databases/$DATABASE_ID/backups``.
- **Note**\\ : Backups are not supported for Caching or Valkey clusters.
- The result will be a JSON object with a ``backups key``. This will be set to an array of backup
- objects, each of which will contain the size of the backup and the timestamp at which it was
- created.
+ To update an existing database user, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/users/$USERNAME``
+ with the desired settings.
+
+ **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change
+ the name of a user,
+ you must recreate a new user.
+
+ The response will be a JSON object with a key called ``user``. The value of this will be an
+ object that contains the name of the update database user, along with the ``settings`` object
+ that
+ has been updated.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
+ :param username: The name of the database user. Required.
+ :type username: str
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -109791,29 +117822,136 @@ async def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set applied to the
+ ACL. 'consume' allows for messages to be consumed from the topic.
+ 'produce' allows for messages to be published to the topic.
+ 'produceconsume' allows for both 'consume' and 'produce' permission.
+ 'admin' allows for 'produceconsume' as well as any operations to
+ administer the topic (delete, update). Required. Known values are:
+ "admin", "consume", "produce", and "produceconsume".
+ "topic": "str", # A regex for matching the topic(s)
+ that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for the ACL.
+ Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to which the
+ user should have access. When the database is set to ``admin``"" ,
+ the user will have access to all databases based on the user's role
+ i.e. a user with the role ``readOnly`` assigned to the ``admin``
+ database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the user
+ with each role mapping to a MongoDB built-in role. ``readOnly`` maps to
+ a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for matching the
+ indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission set
+ applied to the ACL. 'read' allows user to read from the index.
+ 'write' allows for user to write to the index. 'readwrite' allows for
+ both 'read' and 'write' permission. 'deny'(default) restricts user
+ from performing any operation over an index. 'admin' allows for
+ 'readwrite' as well as any operations to administer the index. Known
+ values are: "deny", "admin", "read", "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres clusters, set
+ to ``true`` for a user with replication rights. This option is not currently
+ supported for other database engines.
+ }
+ }
+
+ # response body for status code(s): 201
response == {
- "backups": [
- {
- "created_at": "2020-02-20 00:00:00", # A time value given in
- ISO8601 combined date and time format at which the backup was created.
- Required.
- "size_gigabytes": 0.0, # The size of the database backup in
- GBs. Required.
- "incremental": bool # Optional. Indicates if this backup is
- a full or an incremental one (available only for MySQL).
+ "user": {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL 8.0,
+ the default is ``caching_sha2_password``. Required. Known values are:
+ "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the database user's
+ role. The value will be either "primary" or "normal". Known values are:
+ "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set
+ applied to the ACL. 'consume' allows for messages to be consumed
+ from the topic. 'produce' allows for messages to be published to
+ the topic. 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete, update).
+ Required. Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A regex for matching the
+ topic(s) that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for
+ the ACL. Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to
+ which the user should have access. When the database is set to
+ ``admin``"" , the user will have access to all databases based on
+ the user's role i.e. a user with the role ``readOnly`` assigned
+ to the ``admin`` database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the
+ user with each role mapping to a MongoDB built-in role. ``readOnly``
+ maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for
+ matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission
+ set applied to the ACL. 'read' allows user to read from the
+ index. 'write' allows for user to write to the index. 'readwrite'
+ allows for both 'read' and 'write' permission. 'deny'(default)
+ restricts user from performing any operation over an index.
+ 'admin' allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres
+ clusters, set to ``true`` for a user with replication rights. This option
+ is not currently supported for other database engines.
}
- ],
- "backup_progress": "str", # Optional. If a backup is currently in progress,
- this attribute shows the percentage of completion. If no backup is in progress,
- this attribute will be hidden.
- "scheduled_backup_time": {
- "backup_hour": 0, # Optional. The hour of the day when the backup is
- scheduled (in UTC).
- "backup_interval_hours": 0, # Optional. The frequency, in hours, at
- which backups are taken.
- "backup_minute": 0 # Optional. The minute of the hour when the
- backup is scheduled.
}
}
# response body for status code(s): 404
@@ -109828,99 +117966,169 @@ async def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
tickets to help identify the issue.
}
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- _request = build_databases_list_backups_request(
- database_cluster_uuid=database_cluster_uuid,
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- await self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
- if _stream:
- await response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
+ @overload
+ async def update_user(
+ self,
+ database_cluster_uuid: str,
+ username: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update a Database User.
- response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ To update an existing database user, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/users/$USERNAME``
+ with the desired settings.
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change
+ the name of a user,
+ you must recreate a new user.
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ The response will be a JSON object with a key called ``user``. The value of this will be an
+ object that contains the name of the update database user, along with the ``settings`` object
+ that
+ has been updated.
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param username: The name of the database user. Required.
+ :type username: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ Example:
+ .. code-block:: python
- return cast(JSON, deserialized) # type: ignore
+ # response body for status code(s): 201
+ response == {
+ "user": {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL 8.0,
+ the default is ``caching_sha2_password``. Required. Known values are:
+ "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the database user's
+ role. The value will be either "primary" or "normal". Known values are:
+ "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set
+ applied to the ACL. 'consume' allows for messages to be consumed
+ from the topic. 'produce' allows for messages to be published to
+ the topic. 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete, update).
+ Required. Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A regex for matching the
+ topic(s) that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for
+ the ACL. Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to
+ which the user should have access. When the database is set to
+ ``admin``"" , the user will have access to all databases based on
+ the user's role i.e. a user with the role ``readOnly`` assigned
+ to the ``admin`` database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the
+ user with each role mapping to a MongoDB built-in role. ``readOnly``
+ maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for
+ matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission
+ set applied to the ACL. 'read' allows user to read from the
+ index. 'write' allows for user to write to the index. 'readwrite'
+ allows for both 'read' and 'write' permission. 'deny'(default)
+ restricts user from performing any operation over an index.
+ 'admin' allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres
+ clusters, set to ``true`` for a user with replication rights. This option
+ is not currently supported for other database engines.
+ }
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
@distributed_trace_async
- async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ async def update_user(
+ self,
+ database_cluster_uuid: str,
+ username: str,
+ body: Union[JSON, IO[bytes]],
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """List All Read-only Replicas.
+ """Update a Database User.
- To list all of the read-only replicas associated with a database cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/replicas``.
+ To update an existing database user, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/users/$USERNAME``
+ with the desired settings.
- **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+ **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change
+ the name of a user,
+ you must recreate a new user.
- The result will be a JSON object with a ``replicas`` key. This will be set to an array of
- database replica objects, each of which will contain the standard database replica attributes.
+ The response will be a JSON object with a key called ``user``. The value of this will be an
+ object that contains the name of the update database user, along with the ``settings`` object
+ that
+ has been updated.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
+ :param username: The name of the database user. Required.
+ :type username: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -109928,93 +118136,137 @@ async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set applied to the
+ ACL. 'consume' allows for messages to be consumed from the topic.
+ 'produce' allows for messages to be published to the topic.
+ 'produceconsume' allows for both 'consume' and 'produce' permission.
+ 'admin' allows for 'produceconsume' as well as any operations to
+ administer the topic (delete, update). Required. Known values are:
+ "admin", "consume", "produce", and "produceconsume".
+ "topic": "str", # A regex for matching the topic(s)
+ that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for the ACL.
+ Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to which the
+ user should have access. When the database is set to ``admin``"" ,
+ the user will have access to all databases based on the user's role
+ i.e. a user with the role ``readOnly`` assigned to the ``admin``
+ database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the user
+ with each role mapping to a MongoDB built-in role. ``readOnly`` maps to
+ a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for matching the
+ indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission set
+ applied to the ACL. 'read' allows user to read from the index.
+ 'write' allows for user to write to the index. 'readwrite' allows for
+ both 'read' and 'write' permission. 'deny'(default) restricts user
+ from performing any operation over an index. 'admin' allows for
+ 'readwrite' as well as any operations to administer the index. Known
+ values are: "deny", "admin", "read", "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres clusters, set
+ to ``true`` for a user with replication rights. This option is not currently
+ supported for other database engines.
+ }
+ }
+
+ # response body for status code(s): 201
response == {
- "replicas": [
- {
- "name": "str", # The name to give the read-only replicating.
- Required.
- "connection": {
- "database": "str", # Optional. The name of the
- default database.
- "host": "str", # Optional. The FQDN pointing to the
- database cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- database cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the
- format accepted by the ``psql`` command. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time
- value given in ISO8601 combined date and time format that represents when
- the database cluster was created.
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs
- for the database cluster. Each CNAME must be a valid RFC 1123
- hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed,
- each up to 253 characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to
- identify and reference a database replica.
- "private_connection": {
- "database": "str", # Optional. The name of the
- default database.
- "host": "str", # Optional. The FQDN pointing to the
- database cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- database cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the
- format accepted by the ``psql`` command. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
+ "user": {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL 8.0,
+ the default is ``caching_sha2_password``. Required. Known values are:
+ "mysql_native_password" and "caching_sha2_password".
+ },
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the database user's
+ role. The value will be either "primary" or "normal". Known values are:
+ "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set
+ applied to the ACL. 'consume' allows for messages to be consumed
+ from the topic. 'produce' allows for messages to be published to
+ the topic. 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete, update).
+ Required. Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A regex for matching the
+ topic(s) that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for
+ the ACL. Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to
+ which the user should have access. When the database is set to
+ ``admin``"" , the user will have access to all databases based on
+ the user's role i.e. a user with the role ``readOnly`` assigned
+ to the ``admin`` database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the
+ user with each role mapping to a MongoDB built-in role. ``readOnly``
+ maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
},
- "private_network_uuid": "str", # Optional. A string
- specifying the UUID of the VPC to which the read-only replica will be
- assigned. If excluded, the replica will be assigned to your account's
- default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read``
- scope.
- "region": "str", # Optional. A slug identifier for the
- region where the read-only replica will be located. If excluded, the
- replica will be placed in the same region as the cluster.
- "size": "str", # Optional. A slug identifier representing
- the size of the node for the read-only replica. The size of the replica
- must be at least as large as the node size for the database cluster from
- which it is replicating.
- "status": "str", # Optional. A string representing the
- current status of the database cluster. Known values are: "creating",
- "online", "resizing", "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added
- to the cluster, in MiB. If null, no additional storage is added to the
- cluster, beyond what is provided as a base amount from the 'size' and any
- previously added additional storage.
- "tags": [
- "str" # Optional. A flat array of tag names as
- strings applied to the read-only
- replica.:code:`
`:code:`
`Requires ``tag:read`` scope.
- ]
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for
+ matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission
+ set applied to the ACL. 'read' allows user to read from the
+ index. 'write' allows for user to write to the index. 'readwrite'
+ allows for both 'read' and 'write' permission. 'deny'(default)
+ restricts user from performing any operation over an index.
+ 'admin' allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres
+ clusters, set to ``true`` for a user with replication rights. This option
+ is not currently supported for other database engines.
}
- ]
+ }
}
# response body for status code(s): 404
response == {
@@ -110041,13 +118293,28 @@ async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_list_replicas_request(
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_databases_update_user_request(
database_cluster_uuid=database_cluster_uuid,
+ username=username,
+ content_type=content_type,
+ json=_json,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -110062,14 +118329,14 @@ async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [201, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 201:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -110107,31 +118374,33 @@ async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON
return cast(JSON, deserialized) # type: ignore
@overload
- async def create_replica(
+ async def reset_auth(
self,
database_cluster_uuid: str,
- body: Optional[JSON] = None,
+ username: str,
+ body: JSON,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create a Read-only Replica.
+ """Reset a Database User's Password or Authentication Method.
- To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request
- to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of
- the node to be used, and the region where it will be located.
+ To reset the password for a database user, send a POST request to
+ ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``.
- **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+ For ``mysql`` databases, the authentication method can be specifying by
+ including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin``
+ value specified.
- The response will be a JSON object with a key called ``replica``. The value of this will be an
- object that contains the standard attributes associated with a database replica. The initial
- value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is
- ready to receive traffic, this will transition to ``active``.
+ The response will be a JSON object with a ``user`` key. This will be set to an
+ object containing the standard database user attributes.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Default value is None.
+ :param username: The name of the database user. Required.
+ :type username: str
+ :param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
@@ -110145,159 +118414,91 @@ async def create_replica(
# JSON input template you can fill out and use as your body input.
body = {
- "name": "str", # The name to give the read-only replicating. Required.
- "connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given in
- ISO8601 combined date and time format that represents when the database cluster
- was created.
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs for the database
- cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
- "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
- characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to identify and
- reference a database replica.
- "private_connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_network_uuid": "str", # Optional. A string specifying the UUID of
- the VPC to which the read-only replica will be assigned. If excluded, the replica
- will be assigned to your account's default VPC for the region.
- :code:`
`:code:`
`Requires ``vpc:read`` scope.
- "region": "str", # Optional. A slug identifier for the region where the
- read-only replica will be located. If excluded, the replica will be placed in the
- same region as the cluster.
- "size": "str", # Optional. A slug identifier representing the size of the
- node for the read-only replica. The size of the replica must be at least as large
- as the node size for the database cluster from which it is replicating.
- "status": "str", # Optional. A string representing the current status of the
- database cluster. Known values are: "creating", "online", "resizing",
- "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added to the cluster,
- in MiB. If null, no additional storage is added to the cluster, beyond what is
- provided as a base amount from the 'size' and any previously added additional
- storage.
- "tags": [
- "str" # Optional. A flat array of tag names as strings to apply to
- the read-only replica after it is created. Tag names can either be existing
- or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope.
- ]
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the authentication method
+ to be used for connections to the MySQL user account. The valid values are
+ ``mysql_native_password`` or ``caching_sha2_password``. If excluded when
+ creating a new user, the default for the version of MySQL in use will be
+ used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required.
+ Known values are: "mysql_native_password" and "caching_sha2_password".
+ }
}
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
- "replica": {
- "name": "str", # The name to give the read-only replicating.
- Required.
- "connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the database
- cluster was created.
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs for the
- database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
- "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
- characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to identify
- and reference a database replica.
- "private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
+ "user": {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL 8.0,
+ the default is ``caching_sha2_password``. Required. Known values are:
+ "mysql_native_password" and "caching_sha2_password".
},
- "private_network_uuid": "str", # Optional. A string specifying the
- UUID of the VPC to which the read-only replica will be assigned. If excluded,
- the replica will be assigned to your account's default VPC for the region.
- :code:`
`:code:`
`Requires ``vpc:read`` scope.
- "region": "str", # Optional. A slug identifier for the region where
- the read-only replica will be located. If excluded, the replica will be
- placed in the same region as the cluster.
- "size": "str", # Optional. A slug identifier representing the size
- of the node for the read-only replica. The size of the replica must be at
- least as large as the node size for the database cluster from which it is
- replicating.
- "status": "str", # Optional. A string representing the current
- status of the database cluster. Known values are: "creating", "online",
- "resizing", "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added to the
- cluster, in MiB. If null, no additional storage is added to the cluster,
- beyond what is provided as a base amount from the 'size' and any previously
- added additional storage.
- "tags": [
- "str" # Optional. A flat array of tag names as strings
- applied to the read-only replica.:code:`
`:code:`
`Requires
- ``tag:read`` scope.
- ]
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the database user's
+ role. The value will be either "primary" or "normal". Known values are:
+ "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set
+ applied to the ACL. 'consume' allows for messages to be consumed
+ from the topic. 'produce' allows for messages to be published to
+ the topic. 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete, update).
+ Required. Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A regex for matching the
+ topic(s) that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for
+ the ACL. Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to
+ which the user should have access. When the database is set to
+ ``admin``"" , the user will have access to all databases based on
+ the user's role i.e. a user with the role ``readOnly`` assigned
+ to the ``admin`` database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the
+ user with each role mapping to a MongoDB built-in role. ``readOnly``
+ maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for
+ matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission
+ set applied to the ACL. 'read' allows user to read from the
+ index. 'write' allows for user to write to the index. 'readwrite'
+ allows for both 'read' and 'write' permission. 'deny'(default)
+ restricts user from performing any operation over an index.
+ 'admin' allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres
+ clusters, set to ``true`` for a user with replication rights. This option
+ is not currently supported for other database engines.
+ }
}
}
# response body for status code(s): 404
@@ -110314,31 +118515,33 @@ async def create_replica(
"""
@overload
- async def create_replica(
+ async def reset_auth(
self,
database_cluster_uuid: str,
- body: Optional[IO[bytes]] = None,
+ username: str,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create a Read-only Replica.
-
- To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request
- to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of
- the node to be used, and the region where it will be located.
+ """Reset a Database User's Password or Authentication Method.
- **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+ To reset the password for a database user, send a POST request to
+ ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``.
- The response will be a JSON object with a key called ``replica``. The value of this will be an
- object that contains the standard attributes associated with a database replica. The initial
- value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is
- ready to receive traffic, this will transition to ``active``.
+ For ``mysql`` databases, the authentication method can be specifying by
+ including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin``
+ value specified.
+
+ The response will be a JSON object with a ``user`` key. This will be set to an
+ object containing the standard database user attributes.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Default value is None.
+ :param username: The name of the database user. Required.
+ :type username: str
+ :param body: Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
@@ -110350,85 +118553,81 @@ async def create_replica(
Example:
.. code-block:: python
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
- "replica": {
- "name": "str", # The name to give the read-only replicating.
- Required.
- "connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the database
- cluster was created.
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs for the
- database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
- "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
- characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to identify
- and reference a database replica.
- "private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
+ "user": {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL 8.0,
+ the default is ``caching_sha2_password``. Required. Known values are:
+ "mysql_native_password" and "caching_sha2_password".
},
- "private_network_uuid": "str", # Optional. A string specifying the
- UUID of the VPC to which the read-only replica will be assigned. If excluded,
- the replica will be assigned to your account's default VPC for the region.
- :code:`
`:code:`
`Requires ``vpc:read`` scope.
- "region": "str", # Optional. A slug identifier for the region where
- the read-only replica will be located. If excluded, the replica will be
- placed in the same region as the cluster.
- "size": "str", # Optional. A slug identifier representing the size
- of the node for the read-only replica. The size of the replica must be at
- least as large as the node size for the database cluster from which it is
- replicating.
- "status": "str", # Optional. A string representing the current
- status of the database cluster. Known values are: "creating", "online",
- "resizing", "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added to the
- cluster, in MiB. If null, no additional storage is added to the cluster,
- beyond what is provided as a base amount from the 'size' and any previously
- added additional storage.
- "tags": [
- "str" # Optional. A flat array of tag names as strings
- applied to the read-only replica.:code:`
`:code:`
`Requires
- ``tag:read`` scope.
- ]
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the database user's
+ role. The value will be either "primary" or "normal". Known values are:
+ "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set
+ applied to the ACL. 'consume' allows for messages to be consumed
+ from the topic. 'produce' allows for messages to be published to
+ the topic. 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete, update).
+ Required. Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A regex for matching the
+ topic(s) that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for
+ the ACL. Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to
+ which the user should have access. When the database is set to
+ ``admin``"" , the user will have access to all databases based on
+ the user's role i.e. a user with the role ``readOnly`` assigned
+ to the ``admin`` database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the
+ user with each role mapping to a MongoDB built-in role. ``readOnly``
+ maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for
+ matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission
+ set applied to the ACL. 'read' allows user to read from the
+ index. 'write' allows for user to write to the index. 'readwrite'
+ allows for both 'read' and 'write' permission. 'deny'(default)
+ restricts user from performing any operation over an index.
+ 'admin' allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres
+ clusters, set to ``true`` for a user with replication rights. This option
+ is not currently supported for other database engines.
+ }
}
}
# response body for status code(s): 404
@@ -110445,29 +118644,31 @@ async def create_replica(
"""
@distributed_trace_async
- async def create_replica(
+ async def reset_auth(
self,
database_cluster_uuid: str,
- body: Optional[Union[JSON, IO[bytes]]] = None,
+ username: str,
+ body: Union[JSON, IO[bytes]],
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create a Read-only Replica.
+ """Reset a Database User's Password or Authentication Method.
- To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request
- to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of
- the node to be used, and the region where it will be located.
+ To reset the password for a database user, send a POST request to
+ ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``.
- **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+ For ``mysql`` databases, the authentication method can be specifying by
+ including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin``
+ value specified.
- The response will be a JSON object with a key called ``replica``. The value of this will be an
- object that contains the standard attributes associated with a database replica. The initial
- value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is
- ready to receive traffic, this will transition to ``active``.
+ The response will be a JSON object with a ``user`` key. This will be set to an
+ object containing the standard database user attributes.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
+ :param username: The name of the database user. Required.
+ :type username: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
@@ -110478,159 +118679,91 @@ async def create_replica(
# JSON input template you can fill out and use as your body input.
body = {
- "name": "str", # The name to give the read-only replicating. Required.
- "connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given in
- ISO8601 combined date and time format that represents when the database cluster
- was created.
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs for the database
- cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
- "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
- characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to identify and
- reference a database replica.
- "private_connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_network_uuid": "str", # Optional. A string specifying the UUID of
- the VPC to which the read-only replica will be assigned. If excluded, the replica
- will be assigned to your account's default VPC for the region.
- :code:`
`:code:`
`Requires ``vpc:read`` scope.
- "region": "str", # Optional. A slug identifier for the region where the
- read-only replica will be located. If excluded, the replica will be placed in the
- same region as the cluster.
- "size": "str", # Optional. A slug identifier representing the size of the
- node for the read-only replica. The size of the replica must be at least as large
- as the node size for the database cluster from which it is replicating.
- "status": "str", # Optional. A string representing the current status of the
- database cluster. Known values are: "creating", "online", "resizing",
- "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added to the cluster,
- in MiB. If null, no additional storage is added to the cluster, beyond what is
- provided as a base amount from the 'size' and any previously added additional
- storage.
- "tags": [
- "str" # Optional. A flat array of tag names as strings to apply to
- the read-only replica after it is created. Tag names can either be existing
- or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope.
- ]
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the authentication method
+ to be used for connections to the MySQL user account. The valid values are
+ ``mysql_native_password`` or ``caching_sha2_password``. If excluded when
+ creating a new user, the default for the version of MySQL in use will be
+ used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required.
+ Known values are: "mysql_native_password" and "caching_sha2_password".
+ }
}
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
- "replica": {
- "name": "str", # The name to give the read-only replicating.
- Required.
- "connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the database
- cluster was created.
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs for the
- database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
- "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
- characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to identify
- and reference a database replica.
- "private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
+ "user": {
+ "name": "str", # The name of a database user. Required.
+ "access_cert": "str", # Optional. Access certificate for TLS client
+ authentication. (Kafka only).
+ "access_key": "str", # Optional. Access key for TLS client
+ authentication. (Kafka only).
+ "mysql_settings": {
+ "auth_plugin": "str" # A string specifying the
+ authentication method to be used for connections to the MySQL user
+ account. The valid values are ``mysql_native_password`` or
+ ``caching_sha2_password``. If excluded when creating a new user, the
+ default for the version of MySQL in use will be used. As of MySQL 8.0,
+ the default is ``caching_sha2_password``. Required. Known values are:
+ "mysql_native_password" and "caching_sha2_password".
},
- "private_network_uuid": "str", # Optional. A string specifying the
- UUID of the VPC to which the read-only replica will be assigned. If excluded,
- the replica will be assigned to your account's default VPC for the region.
- :code:`
`:code:`
`Requires ``vpc:read`` scope.
- "region": "str", # Optional. A slug identifier for the region where
- the read-only replica will be located. If excluded, the replica will be
- placed in the same region as the cluster.
- "size": "str", # Optional. A slug identifier representing the size
- of the node for the read-only replica. The size of the replica must be at
- least as large as the node size for the database cluster from which it is
- replicating.
- "status": "str", # Optional. A string representing the current
- status of the database cluster. Known values are: "creating", "online",
- "resizing", "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added to the
- cluster, in MiB. If null, no additional storage is added to the cluster,
- beyond what is provided as a base amount from the 'size' and any previously
- added additional storage.
- "tags": [
- "str" # Optional. A flat array of tag names as strings
- applied to the read-only replica.:code:`
`:code:`
`Requires
- ``tag:read`` scope.
- ]
+ "password": "str", # Optional. A randomly generated password for the
+ database user.:code:`
`Requires ``database:view_credentials`` scope.
+ "role": "str", # Optional. A string representing the database user's
+ role. The value will be either "primary" or "normal". Known values are:
+ "primary" and "normal".
+ "settings": {
+ "acl": [
+ {
+ "permission": "str", # Permission set
+ applied to the ACL. 'consume' allows for messages to be consumed
+ from the topic. 'produce' allows for messages to be published to
+ the topic. 'produceconsume' allows for both 'consume' and
+ 'produce' permission. 'admin' allows for 'produceconsume' as well
+ as any operations to administer the topic (delete, update).
+ Required. Known values are: "admin", "consume", "produce", and
+ "produceconsume".
+ "topic": "str", # A regex for matching the
+ topic(s) that this ACL should apply to. Required.
+ "id": "str" # Optional. An identifier for
+ the ACL. Will be computed after the ACL is created/updated.
+ }
+ ],
+ "mongo_user_settings": {
+ "databases": [
+ "str" # Optional. A list of databases to
+ which the user should have access. When the database is set to
+ ``admin``"" , the user will have access to all databases based on
+ the user's role i.e. a user with the role ``readOnly`` assigned
+ to the ``admin`` database will have read access to all databases.
+ ],
+ "role": "str" # Optional. The role to assign to the
+ user with each role mapping to a MongoDB built-in role. ``readOnly``
+ maps to a `read
+ `_
+ role. ``readWrite`` maps to a `readWrite
+ `_
+ role. ``dbAdmin`` maps to a `dbAdmin
+ `_
+ role. Known values are: "readOnly", "readWrite", and "dbAdmin".
+ },
+ "opensearch_acl": [
+ {
+ "index": "str", # Optional. A regex for
+ matching the indexes that this ACL should apply to.
+ "permission": "str" # Optional. Permission
+ set applied to the ACL. 'read' allows user to read from the
+ index. 'write' allows for user to write to the index. 'readwrite'
+ allows for both 'read' and 'write' permission. 'deny'(default)
+ restricts user from performing any operation over an index.
+ 'admin' allows for 'readwrite' as well as any operations to
+ administer the index. Known values are: "deny", "admin", "read",
+ "readwrite", and "write".
+ }
+ ],
+ "pg_allow_replication": bool # Optional. For Postgres
+ clusters, set to ``true`` for a user with replication rights. This option
+ is not currently supported for other database engines.
+ }
}
}
# response body for status code(s): 404
@@ -110672,13 +118805,11 @@ async def create_replica(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- if body is not None:
- _json = body
- else:
- _json = None
+ _json = body
- _request = build_databases_create_replica_request(
+ _request = build_databases_reset_auth_request(
database_cluster_uuid=database_cluster_uuid,
+ username=username,
content_type=content_type,
json=_json,
content=_content,
@@ -110696,14 +118827,14 @@ async def create_replica(
response = pipeline_response.http_response
- if response.status_code not in [201, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 201:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -110741,14 +118872,17 @@ async def create_replica(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """List all Events Logs.
+ """List All Databases.
- To list all of the cluster events, send a GET request to
- ``/v2/databases/$DATABASE_ID/events``.
+ To list all of the databases in a clusters, send a GET request to
+ ``/v2/databases/$DATABASE_ID/dbs``.
- The result will be a JSON object with a ``events`` key.
+ The result will be a JSON object with a ``dbs`` key. This will be set to an array
+ of database objects, each of which will contain the standard database attributes.
+
+ Note: Database management is not supported for Caching or Valkey clusters.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -110761,16 +118895,9 @@ async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> J
# response body for status code(s): 200
response == {
- "events": [
+ "dbs": [
{
- "cluster_name": "str", # Optional. The name of cluster.
- "create_time": "str", # Optional. The time of the generation
- of a event.
- "event_type": "str", # Optional. Type of the event. Known
- values are: "cluster_maintenance_perform", "cluster_master_promotion",
- "cluster_create", "cluster_update", "cluster_delete", "cluster_poweron",
- and "cluster_poweroff".
- "id": "str" # Optional. ID of the particular event.
+ "name": "str" # The name of the database. Required.
}
]
}
@@ -110804,7 +118931,7 @@ async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> J
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_list_events_logs_request(
+ _request = build_databases_list_request(
database_cluster_uuid=database_cluster_uuid,
headers=_headers,
params=_params,
@@ -110864,25 +118991,136 @@ async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> J
return cast(JSON, deserialized) # type: ignore
+ @overload
+ async def add(
+ self,
+ database_cluster_uuid: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Add a New Database.
+
+ To add a new database to an existing cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/dbs``.
+
+ Note: Database management is not supported for Caching or Valkey clusters.
+
+ The response will be a JSON object with a key called ``db``. The value of this will be
+ an object that contains the standard attributes associated with a database.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "name": "str" # The name of the database. Required.
+ }
+
+ # response body for status code(s): 201
+ response == {
+ "db": {
+ "name": "str" # The name of the database. Required.
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def add(
+ self,
+ database_cluster_uuid: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Add a New Database.
+
+ To add a new database to an existing cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/dbs``.
+
+ Note: Database management is not supported for Caching or Valkey clusters.
+
+ The response will be a JSON object with a key called ``db``. The value of this will be
+ an object that contains the standard attributes associated with a database.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 201
+ response == {
+ "db": {
+ "name": "str" # The name of the database. Required.
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
@distributed_trace_async
- async def get_replica(
- self, database_cluster_uuid: str, replica_name: str, **kwargs: Any
+ async def add(
+ self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Retrieve an Existing Read-only Replica.
+ """Add a New Database.
- To show information about an existing database replica, send a GET request to
- ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``.
+ To add a new database to an existing cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/dbs``.
- **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+ Note: Database management is not supported for Caching or Valkey clusters.
- The response will be a JSON object with a ``replica key``. This will be set to an object
- containing the standard database replica attributes.
+ The response will be a JSON object with a key called ``db``. The value of this will be
+ an object that contains the standard attributes associated with a database.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param replica_name: The name of the database replica. Required.
- :type replica_name: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -110890,85 +119128,15 @@ async def get_replica(
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "name": "str" # The name of the database. Required.
+ }
+
+ # response body for status code(s): 201
response == {
- "replica": {
- "name": "str", # The name to give the read-only replicating.
- Required.
- "connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the database
- cluster was created.
- "do_settings": {
- "service_cnames": [
- "str" # Optional. An array of custom CNAMEs for the
- database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g.,
- "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253
- characters.
- ]
- },
- "id": "str", # Optional. A unique ID that can be used to identify
- and reference a database replica.
- "private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_network_uuid": "str", # Optional. A string specifying the
- UUID of the VPC to which the read-only replica will be assigned. If excluded,
- the replica will be assigned to your account's default VPC for the region.
- :code:`
`:code:`
`Requires ``vpc:read`` scope.
- "region": "str", # Optional. A slug identifier for the region where
- the read-only replica will be located. If excluded, the replica will be
- placed in the same region as the cluster.
- "size": "str", # Optional. A slug identifier representing the size
- of the node for the read-only replica. The size of the replica must be at
- least as large as the node size for the database cluster from which it is
- replicating.
- "status": "str", # Optional. A string representing the current
- status of the database cluster. Known values are: "creating", "online",
- "resizing", "migrating", and "forking".
- "storage_size_mib": 0, # Optional. Additional storage added to the
- cluster, in MiB. If null, no additional storage is added to the cluster,
- beyond what is provided as a base amount from the 'size' and any previously
- added additional storage.
- "tags": [
- "str" # Optional. A flat array of tag names as strings
- applied to the read-only replica.:code:`
`:code:`
`Requires
- ``tag:read`` scope.
- ]
+ "db": {
+ "name": "str" # The name of the database. Required.
}
}
# response body for status code(s): 404
@@ -110996,14 +119164,27 @@ async def get_replica(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_replica_request(
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_databases_add_request(
database_cluster_uuid=database_cluster_uuid,
- replica_name=replica_name,
+ content_type=content_type,
+ json=_json,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -111018,14 +119199,14 @@ async def get_replica(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [201, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 201:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -111063,31 +119244,37 @@ async def get_replica(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def destroy_replica(
- self, database_cluster_uuid: str, replica_name: str, **kwargs: Any
- ) -> Optional[JSON]:
+ async def get(
+ self, database_cluster_uuid: str, database_name: str, **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Destroy a Read-only Replica.
+ """Retrieve an Existing Database.
- To destroy a specific read-only replica, send a DELETE request to
- ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``.
+ To show information about an existing database cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``.
- **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+ Note: Database management is not supported for Caching or Valkey clusters.
- A status of 204 will be given. This indicates that the request was processed successfully, but
- that no response body is needed.
+ The response will be a JSON object with a ``db`` key. This will be set to an object
+ containing the standard database attributes.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param replica_name: The name of the database replica. Required.
- :type replica_name: str
- :return: JSON object or None
- :rtype: JSON or None
+ :param database_name: The name of the database. Required.
+ :type database_name: str
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
+ # response body for status code(s): 200
+ response == {
+ "db": {
+ "name": "str" # The name of the database. Required.
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -111116,11 +119303,11 @@ async def destroy_replica(
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_destroy_replica_request(
+ _request = build_databases_get_request(
database_cluster_uuid=database_cluster_uuid,
- replica_name=replica_name,
+ database_name=database_name,
headers=_headers,
params=_params,
)
@@ -111135,15 +119322,14 @@ async def destroy_replica(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -111154,6 +119340,11 @@ async def destroy_replica(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -111171,29 +119362,29 @@ async def destroy_replica(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def promote_replica(
- self, database_cluster_uuid: str, replica_name: str, **kwargs: Any
+ async def delete(
+ self, database_cluster_uuid: str, database_name: str, **kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Promote a Read-only Replica to become a Primary Cluster.
+ """Delete a Database.
- To promote a specific read-only replica, send a PUT request to
- ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME/promote``.
+ To delete a specific database, send a DELETE request to
+ ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``.
- **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters.
+ A status of 204 will be given. This indicates that the request was processed
+ successfully, but that no response body is needed.
- A status of 204 will be given. This indicates that the request was processed successfully, but
- that no response body is needed.
+ Note: Database management is not supported for Caching or Valkey clusters.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param replica_name: The name of the database replica. Required.
- :type replica_name: str
+ :param database_name: The name of the database. Required.
+ :type database_name: str
:return: JSON object or None
:rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
@@ -111231,9 +119422,9 @@ async def promote_replica(
cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_databases_promote_replica_request(
+ _request = build_databases_delete_request(
database_cluster_uuid=database_cluster_uuid,
- replica_name=replica_name,
+ database_name=database_name,
headers=_headers,
params=_params,
)
@@ -111289,23 +119480,16 @@ async def promote_replica(
return deserialized # type: ignore
@distributed_trace_async
- async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ async def list_connection_pools(
+ self, database_cluster_uuid: str, **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """List all Database Users.
-
- To list all of the users for your database cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/users``.
-
- Note: User management is not supported for Caching or Valkey clusters.
-
- The result will be a JSON object with a ``users`` key. This will be set to an array
- of database user objects, each of which will contain the standard database user attributes.
- User passwords will not show without the ``database:view_credentials`` scope.
-
- For MySQL clusters, additional options will be contained in the mysql_settings object.
+ """List Connection Pools (PostgreSQL).
- For MongoDB clusters, additional information will be contained in the mongo_user_settings
- object.
+ To list all of the connection pools available to a PostgreSQL database cluster, send a GET
+ request to ``/v2/databases/$DATABASE_ID/pools``.
+ The result will be a JSON object with a ``pools`` key. This will be set to an array of
+ connection pool objects.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -111318,86 +119502,109 @@ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "users": [
+ "pools": [
{
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS
- client authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL
- 8.0, the default is ``caching_sha2_password``. Required. Known values
- are: "mysql_native_password" and "caching_sha2_password".
+ "db": "str", # The database for use with the connection
+ pool. Required.
+ "mode": "str", # The PGBouncer transaction mode for the
+ connection pool. The allowed values are session, transaction, and
+ statement. Required.
+ "name": "str", # A unique name for the connection pool. Must
+ be between 3 and 60 characters. Required.
+ "size": 0, # The desired size of the PGBouncer connection
+ pool. The maximum allowed size is determined by the size of the cluster's
+ primary node. 25 backend server connections are allowed for every 1GB of
+ RAM. Three are reserved for maintenance. For example, a primary node with
+ 1 GB of RAM allows for a maximum of 22 backend server connections while
+ one with 4 GB would allow for 97. Note that these are shared across all
+ connection pools in a cluster. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the
+ default database.
+ "host": "str", # Optional. The FQDN pointing to the
+ database cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ database cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the
+ format accepted by the ``psql`` command. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
},
- "password": "str", # Optional. A randomly generated password
- for the database user.:code:`
`Requires ``database:view_credentials``
- scope.
- "role": "str", # Optional. A string representing the
- database user's role. The value will be either "primary" or "normal".
- Known values are: "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission
- set applied to the ACL. 'consume' allows for messages to be
- consumed from the topic. 'produce' allows for messages to be
- published to the topic. 'produceconsume' allows for both
- 'consume' and 'produce' permission. 'admin' allows for
- 'produceconsume' as well as any operations to administer the
- topic (delete, update). Required. Known values are: "admin",
- "consume", "produce", and "produceconsume".
- "topic": "str", # A regex for
- matching the topic(s) that this ACL should apply to.
- Required.
- "id": "str" # Optional. An
- identifier for the ACL. Will be computed after the ACL is
- created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of
- databases to which the user should have access. When the
- database is set to ``admin``"" , the user will have access to
- all databases based on the user's role i.e. a user with the
- role ``readOnly`` assigned to the ``admin`` database will
- have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign
- to the user with each role mapping to a MongoDB built-in role.
- ``readOnly`` maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex
- for matching the indexes that this ACL should apply to.
- "permission": "str" # Optional.
- Permission set applied to the ACL. 'read' allows user to read
- from the index. 'write' allows for user to write to the
- index. 'readwrite' allows for both 'read' and 'write'
- permission. 'deny'(default) restricts user from performing
- any operation over an index. 'admin' allows for 'readwrite'
- as well as any operations to administer the index. Known
- values are: "deny", "admin", "read", "readwrite", and
- "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For
- Postgres clusters, set to ``true`` for a user with replication
- rights. This option is not currently supported for other database
- engines.
- }
+ "private_connection": {
+ "database": "str", # Optional. The name of the
+ default database.
+ "host": "str", # Optional. The FQDN pointing to the
+ database cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ database cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the
+ format accepted by the ``psql`` command. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "standby_connection": {
+ "database": "str", # Optional. The name of the
+ default database.
+ "host": "str", # Optional. The FQDN pointing to the
+ database cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ database cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the
+ format accepted by the ``psql`` command. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the
+ default database.
+ "host": "str", # Optional. The FQDN pointing to the
+ database cluster's current primary node.
+ "password": "str", # Optional. The randomly
+ generated password for the default
+ user.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ "port": 0, # Optional. The port on which the
+ database cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating
+ if the connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the
+ format accepted by the ``psql`` command. This is provided as a
+ convenience and should be able to be constructed by the other
+ attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ },
+ "user": "str" # Optional. The name of the user for use with
+ the connection pool. When excluded, all sessions connect to the database
+ as the inbound user.
}
]
}
@@ -111431,7 +119638,7 @@ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_list_users_request(
+ _request = build_databases_list_connection_pools_request(
database_cluster_uuid=database_cluster_uuid,
headers=_headers,
params=_params,
@@ -111492,7 +119699,7 @@ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
return cast(JSON, deserialized) # type: ignore
@overload
- async def add_user(
+ async def add_connection_pool(
self,
database_cluster_uuid: str,
body: JSON,
@@ -111501,192 +119708,218 @@ async def add_user(
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Add a Database User.
-
- To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users``
- with the desired username.
-
- Note: User management is not supported for Caching or Valkey clusters.
-
- When adding a user to a MySQL cluster, additional options can be configured in the
- ``mysql_settings`` object.
-
- When adding a user to a Kafka cluster, additional options can be configured in
- the ``settings`` object.
+ """Add a New Connection Pool (PostgreSQL).
- When adding a user to a MongoDB cluster, additional options can be configured in
- the ``settings.mongo_user_settings`` object.
+ For PostgreSQL database clusters, connection pools can be used to allow a
+ database to share its idle connections. The popular PostgreSQL connection
+ pooling utility PgBouncer is used to provide this service. `See here for more information
+ `_
+ about how and why to use PgBouncer connection pooling including
+ details about the available transaction modes.
- The response will be a JSON object with a key called ``user``. The value of this will be an
- object that contains the standard attributes associated with a database user including
- its randomly generated password.
+ To add a new connection pool to a PostgreSQL database cluster, send a POST
+ request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool,
+ the user to connect with, the database to connect to, as well as its desired
+ size and transaction mode.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
:param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client authentication.
- (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the authentication method
- to be used for connections to the MySQL user account. The valid values are
- ``mysql_native_password`` or ``caching_sha2_password``. If excluded when
- creating a new user, the default for the version of MySQL in use will be
- used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required.
- Known values are: "mysql_native_password" and "caching_sha2_password".
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "db": "str", # The database for use with the connection pool. Required.
+ "mode": "str", # The PGBouncer transaction mode for the connection pool. The
+ allowed values are session, transaction, and statement. Required.
+ "name": "str", # A unique name for the connection pool. Must be between 3
+ and 60 characters. Required.
+ "size": 0, # The desired size of the PGBouncer connection pool. The maximum
+ allowed size is determined by the size of the cluster's primary node. 25 backend
+ server connections are allowed for every 1GB of RAM. Three are reserved for
+ maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of
+ 22 backend server connections while one with 4 GB would allow for 97. Note that
+ these are shared across all connection pools in a cluster. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
},
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "readonly": bool, # Optional. (To be deprecated: use
- settings.mongo_user_settings.role instead for access controls to MongoDB
- databases). For MongoDB clusters, set to ``true`` to create a read-only user.
- This option is not currently supported for other database engines.
- "role": "str", # Optional. A string representing the database user's role.
- The value will be either "primary" or "normal". Known values are: "primary" and
- "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set applied to the
- ACL. 'consume' allows for messages to be consumed from the topic.
- 'produce' allows for messages to be published to the topic.
- 'produceconsume' allows for both 'consume' and 'produce' permission.
- 'admin' allows for 'produceconsume' as well as any operations to
- administer the topic (delete, update). Required. Known values are:
- "admin", "consume", "produce", and "produceconsume".
- "topic": "str", # A regex for matching the topic(s)
- that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for the ACL.
- Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to which the
- user should have access. When the database is set to ``admin``"" ,
- the user will have access to all databases based on the user's role
- i.e. a user with the role ``readOnly`` assigned to the ``admin``
- database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the user
- with each role mapping to a MongoDB built-in role. ``readOnly`` maps to
- a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for matching the
- indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission set
- applied to the ACL. 'read' allows user to read from the index.
- 'write' allows for user to write to the index. 'readwrite' allows for
- both 'read' and 'write' permission. 'deny'(default) restricts user
- from performing any operation over an index. 'admin' allows for
- 'readwrite' as well as any operations to administer the index. Known
- values are: "deny", "admin", "read", "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres clusters, set
- to ``true`` for a user with replication rights. This option is not currently
- supported for other database engines.
- }
+ "private_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "user": "str" # Optional. The name of the user for use with the connection
+ pool. When excluded, all sessions connect to the database as the inbound user.
}
# response body for status code(s): 201
response == {
- "user": {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL 8.0,
- the default is ``caching_sha2_password``. Required. Known values are:
- "mysql_native_password" and "caching_sha2_password".
+ "pool": {
+ "db": "str", # The database for use with the connection pool.
+ Required.
+ "mode": "str", # The PGBouncer transaction mode for the connection
+ pool. The allowed values are session, transaction, and statement. Required.
+ "name": "str", # A unique name for the connection pool. Must be
+ between 3 and 60 characters. Required.
+ "size": 0, # The desired size of the PGBouncer connection pool. The
+ maximum allowed size is determined by the size of the cluster's primary node.
+ 25 backend server connections are allowed for every 1GB of RAM. Three are
+ reserved for maintenance. For example, a primary node with 1 GB of RAM allows
+ for a maximum of 22 backend server connections while one with 4 GB would
+ allow for 97. Note that these are shared across all connection pools in a
+ cluster. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
},
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the database user's
- role. The value will be either "primary" or "normal". Known values are:
- "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set
- applied to the ACL. 'consume' allows for messages to be consumed
- from the topic. 'produce' allows for messages to be published to
- the topic. 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete, update).
- Required. Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A regex for matching the
- topic(s) that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for
- the ACL. Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to
- which the user should have access. When the database is set to
- ``admin``"" , the user will have access to all databases based on
- the user's role i.e. a user with the role ``readOnly`` assigned
- to the ``admin`` database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the
- user with each role mapping to a MongoDB built-in role. ``readOnly``
- maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for
- matching the indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission
- set applied to the ACL. 'read' allows user to read from the
- index. 'write' allows for user to write to the index. 'readwrite'
- allows for both 'read' and 'write' permission. 'deny'(default)
- restricts user from performing any operation over an index.
- 'admin' allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres
- clusters, set to ``true`` for a user with replication rights. This option
- is not currently supported for other database engines.
- }
+ "private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "user": "str" # Optional. The name of the user for use with the
+ connection pool. When excluded, all sessions connect to the database as the
+ inbound user.
}
}
# response body for status code(s): 404
@@ -111703,7 +119936,7 @@ async def add_user(
"""
@overload
- async def add_user(
+ async def add_connection_pool(
self,
database_cluster_uuid: str,
body: IO[bytes],
@@ -111712,25 +119945,19 @@ async def add_user(
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Add a Database User.
-
- To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users``
- with the desired username.
-
- Note: User management is not supported for Caching or Valkey clusters.
-
- When adding a user to a MySQL cluster, additional options can be configured in the
- ``mysql_settings`` object.
-
- When adding a user to a Kafka cluster, additional options can be configured in
- the ``settings`` object.
+ """Add a New Connection Pool (PostgreSQL).
- When adding a user to a MongoDB cluster, additional options can be configured in
- the ``settings.mongo_user_settings`` object.
+ For PostgreSQL database clusters, connection pools can be used to allow a
+ database to share its idle connections. The popular PostgreSQL connection
+ pooling utility PgBouncer is used to provide this service. `See here for more information
+ `_
+ about how and why to use PgBouncer connection pooling including
+ details about the available transaction modes.
- The response will be a JSON object with a key called ``user``. The value of this will be an
- object that contains the standard attributes associated with a database user including
- its randomly generated password.
+ To add a new connection pool to a PostgreSQL database cluster, send a POST
+ request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool,
+ the user to connect with, the database to connect to, as well as its desired
+ size and transaction mode.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -111748,79 +119975,99 @@ async def add_user(
# response body for status code(s): 201
response == {
- "user": {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL 8.0,
- the default is ``caching_sha2_password``. Required. Known values are:
- "mysql_native_password" and "caching_sha2_password".
+ "pool": {
+ "db": "str", # The database for use with the connection pool.
+ Required.
+ "mode": "str", # The PGBouncer transaction mode for the connection
+ pool. The allowed values are session, transaction, and statement. Required.
+ "name": "str", # A unique name for the connection pool. Must be
+ between 3 and 60 characters. Required.
+ "size": 0, # The desired size of the PGBouncer connection pool. The
+ maximum allowed size is determined by the size of the cluster's primary node.
+ 25 backend server connections are allowed for every 1GB of RAM. Three are
+ reserved for maintenance. For example, a primary node with 1 GB of RAM allows
+ for a maximum of 22 backend server connections while one with 4 GB would
+ allow for 97. Note that these are shared across all connection pools in a
+ cluster. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
},
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the database user's
- role. The value will be either "primary" or "normal". Known values are:
- "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set
- applied to the ACL. 'consume' allows for messages to be consumed
- from the topic. 'produce' allows for messages to be published to
- the topic. 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete, update).
- Required. Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A regex for matching the
- topic(s) that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for
- the ACL. Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to
- which the user should have access. When the database is set to
- ``admin``"" , the user will have access to all databases based on
- the user's role i.e. a user with the role ``readOnly`` assigned
- to the ``admin`` database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the
- user with each role mapping to a MongoDB built-in role. ``readOnly``
- maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for
- matching the indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission
- set applied to the ACL. 'read' allows user to read from the
- index. 'write' allows for user to write to the index. 'readwrite'
- allows for both 'read' and 'write' permission. 'deny'(default)
- restricts user from performing any operation over an index.
- 'admin' allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres
- clusters, set to ``true`` for a user with replication rights. This option
- is not currently supported for other database engines.
- }
+ "private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "user": "str" # Optional. The name of the user for use with the
+ connection pool. When excluded, all sessions connect to the database as the
+ inbound user.
}
}
# response body for status code(s): 404
@@ -111837,29 +120084,23 @@ async def add_user(
"""
@distributed_trace_async
- async def add_user(
+ async def add_connection_pool(
self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Add a Database User.
-
- To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users``
- with the desired username.
-
- Note: User management is not supported for Caching or Valkey clusters.
-
- When adding a user to a MySQL cluster, additional options can be configured in the
- ``mysql_settings`` object.
-
- When adding a user to a Kafka cluster, additional options can be configured in
- the ``settings`` object.
+ """Add a New Connection Pool (PostgreSQL).
- When adding a user to a MongoDB cluster, additional options can be configured in
- the ``settings.mongo_user_settings`` object.
+ For PostgreSQL database clusters, connection pools can be used to allow a
+ database to share its idle connections. The popular PostgreSQL connection
+ pooling utility PgBouncer is used to provide this service. `See here for more information
+ `_
+ about how and why to use PgBouncer connection pooling including
+ details about the available transaction modes.
- The response will be a JSON object with a key called ``user``. The value of this will be an
- object that contains the standard attributes associated with a database user including
- its randomly generated password.
+ To add a new connection pool to a PostgreSQL database cluster, send a POST
+ request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool,
+ the user to connect with, the database to connect to, as well as its desired
+ size and transaction mode.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -111874,156 +120115,188 @@ async def add_user(
# JSON input template you can fill out and use as your body input.
body = {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client authentication.
- (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the authentication method
- to be used for connections to the MySQL user account. The valid values are
- ``mysql_native_password`` or ``caching_sha2_password``. If excluded when
- creating a new user, the default for the version of MySQL in use will be
- used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required.
- Known values are: "mysql_native_password" and "caching_sha2_password".
+ "db": "str", # The database for use with the connection pool. Required.
+ "mode": "str", # The PGBouncer transaction mode for the connection pool. The
+ allowed values are session, transaction, and statement. Required.
+ "name": "str", # A unique name for the connection pool. Must be between 3
+ and 60 characters. Required.
+ "size": 0, # The desired size of the PGBouncer connection pool. The maximum
+ allowed size is determined by the size of the cluster's primary node. 25 backend
+ server connections are allowed for every 1GB of RAM. Three are reserved for
+ maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of
+ 22 backend server connections while one with 4 GB would allow for 97. Note that
+ these are shared across all connection pools in a cluster. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
},
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "readonly": bool, # Optional. (To be deprecated: use
- settings.mongo_user_settings.role instead for access controls to MongoDB
- databases). For MongoDB clusters, set to ``true`` to create a read-only user.
- This option is not currently supported for other database engines.
- "role": "str", # Optional. A string representing the database user's role.
- The value will be either "primary" or "normal". Known values are: "primary" and
- "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set applied to the
- ACL. 'consume' allows for messages to be consumed from the topic.
- 'produce' allows for messages to be published to the topic.
- 'produceconsume' allows for both 'consume' and 'produce' permission.
- 'admin' allows for 'produceconsume' as well as any operations to
- administer the topic (delete, update). Required. Known values are:
- "admin", "consume", "produce", and "produceconsume".
- "topic": "str", # A regex for matching the topic(s)
- that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for the ACL.
- Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to which the
- user should have access. When the database is set to ``admin``"" ,
- the user will have access to all databases based on the user's role
- i.e. a user with the role ``readOnly`` assigned to the ``admin``
- database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the user
- with each role mapping to a MongoDB built-in role. ``readOnly`` maps to
- a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for matching the
- indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission set
- applied to the ACL. 'read' allows user to read from the index.
- 'write' allows for user to write to the index. 'readwrite' allows for
- both 'read' and 'write' permission. 'deny'(default) restricts user
- from performing any operation over an index. 'admin' allows for
- 'readwrite' as well as any operations to administer the index. Known
- values are: "deny", "admin", "read", "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres clusters, set
- to ``true`` for a user with replication rights. This option is not currently
- supported for other database engines.
- }
+ "private_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the default database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated password for
+ the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database cluster is
+ listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format accepted
+ by the ``psql`` command. This is provided as a convenience and should be able
+ to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "user": "str" # Optional. The name of the user for use with the connection
+ pool. When excluded, all sessions connect to the database as the inbound user.
}
# response body for status code(s): 201
response == {
- "user": {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL 8.0,
- the default is ``caching_sha2_password``. Required. Known values are:
- "mysql_native_password" and "caching_sha2_password".
+ "pool": {
+ "db": "str", # The database for use with the connection pool.
+ Required.
+ "mode": "str", # The PGBouncer transaction mode for the connection
+ pool. The allowed values are session, transaction, and statement. Required.
+ "name": "str", # A unique name for the connection pool. Must be
+ between 3 and 60 characters. Required.
+ "size": 0, # The desired size of the PGBouncer connection pool. The
+ maximum allowed size is determined by the size of the cluster's primary node.
+ 25 backend server connections are allowed for every 1GB of RAM. Three are
+ reserved for maintenance. For example, a primary node with 1 GB of RAM allows
+ for a maximum of 22 backend server connections while one with 4 GB would
+ allow for 97. Note that these are shared across all connection pools in a
+ cluster. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
},
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the database user's
- role. The value will be either "primary" or "normal". Known values are:
- "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set
- applied to the ACL. 'consume' allows for messages to be consumed
- from the topic. 'produce' allows for messages to be published to
- the topic. 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete, update).
- Required. Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A regex for matching the
- topic(s) that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for
- the ACL. Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to
- which the user should have access. When the database is set to
- ``admin``"" , the user will have access to all databases based on
- the user's role i.e. a user with the role ``readOnly`` assigned
- to the ``admin`` database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the
- user with each role mapping to a MongoDB built-in role. ``readOnly``
- maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for
- matching the indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission
- set applied to the ACL. 'read' allows user to read from the
- index. 'write' allows for user to write to the index. 'readwrite'
- allows for both 'read' and 'write' permission. 'deny'(default)
- restricts user from performing any operation over an index.
- 'admin' allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres
- clusters, set to ``true`` for a user with replication rights. This option
- is not currently supported for other database engines.
- }
+ "private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "user": "str" # Optional. The name of the user for use with the
+ connection pool. When excluded, all sessions connect to the database as the
+ inbound user.
}
}
# response body for status code(s): 404
@@ -112067,7 +120340,7 @@ async def add_user(
else:
_json = body
- _request = build_databases_add_user_request(
+ _request = build_databases_add_connection_pool_request(
database_cluster_uuid=database_cluster_uuid,
content_type=content_type,
json=_json,
@@ -112131,33 +120404,20 @@ async def add_user(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_user(
- self, database_cluster_uuid: str, username: str, **kwargs: Any
+ async def get_connection_pool(
+ self, database_cluster_uuid: str, pool_name: str, **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Retrieve an Existing Database User.
-
- To show information about an existing database user, send a GET request to
- ``/v2/databases/$DATABASE_ID/users/$USERNAME``.
-
- Note: User management is not supported for Caching or Valkey clusters.
-
- The response will be a JSON object with a ``user`` key. This will be set to an object
- containing the standard database user attributes. The user's password will not show
- up unless the ``database:view_credentials`` scope is present.
-
- For MySQL clusters, additional options will be contained in the ``mysql_settings``
- object.
-
- For Kafka clusters, additional options will be contained in the ``settings`` object.
+ """Retrieve Existing Connection Pool (PostgreSQL).
- For MongoDB clusters, additional information will be contained in the mongo_user_settings
- object.
+ To show information about an existing connection pool for a PostgreSQL database cluster, send a
+ GET request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``.
+ The response will be a JSON object with a ``pool`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param username: The name of the database user. Required.
- :type username: str
+ :param pool_name: The name used to identify the connection pool. Required.
+ :type pool_name: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -112167,79 +120427,99 @@ async def get_user(
# response body for status code(s): 200
response == {
- "user": {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL 8.0,
- the default is ``caching_sha2_password``. Required. Known values are:
- "mysql_native_password" and "caching_sha2_password".
+ "pool": {
+ "db": "str", # The database for use with the connection pool.
+ Required.
+ "mode": "str", # The PGBouncer transaction mode for the connection
+ pool. The allowed values are session, transaction, and statement. Required.
+ "name": "str", # A unique name for the connection pool. Must be
+ between 3 and 60 characters. Required.
+ "size": 0, # The desired size of the PGBouncer connection pool. The
+ maximum allowed size is determined by the size of the cluster's primary node.
+ 25 backend server connections are allowed for every 1GB of RAM. Three are
+ reserved for maintenance. For example, a primary node with 1 GB of RAM allows
+ for a maximum of 22 backend server connections while one with 4 GB would
+ allow for 97. Note that these are shared across all connection pools in a
+ cluster. Required.
+ "connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
},
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the database user's
- role. The value will be either "primary" or "normal". Known values are:
- "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set
- applied to the ACL. 'consume' allows for messages to be consumed
- from the topic. 'produce' allows for messages to be published to
- the topic. 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete, update).
- Required. Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A regex for matching the
- topic(s) that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for
- the ACL. Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to
- which the user should have access. When the database is set to
- ``admin``"" , the user will have access to all databases based on
- the user's role i.e. a user with the role ``readOnly`` assigned
- to the ``admin`` database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the
- user with each role mapping to a MongoDB built-in role. ``readOnly``
- maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for
- matching the indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission
- set applied to the ACL. 'read' allows user to read from the
- index. 'write' allows for user to write to the index. 'readwrite'
- allows for both 'read' and 'write' permission. 'deny'(default)
- restricts user from performing any operation over an index.
- 'admin' allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres
- clusters, set to ``true`` for a user with replication rights. This option
- is not currently supported for other database engines.
- }
+ "private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "standby_private_connection": {
+ "database": "str", # Optional. The name of the default
+ database.
+ "host": "str", # Optional. The FQDN pointing to the database
+ cluster's current primary node.
+ "password": "str", # Optional. The randomly generated
+ password for the default user.:code:`
`:code:`
`Requires
+ ``database:view_credentials`` scope.
+ "port": 0, # Optional. The port on which the database
+ cluster is listening.
+ "ssl": bool, # Optional. A boolean value indicating if the
+ connection should be made over SSL.
+ "uri": "str", # Optional. A connection string in the format
+ accepted by the ``psql`` command. This is provided as a convenience and
+ should be able to be constructed by the other attributes.
+ "user": "str" # Optional. The default user for the
+ database.:code:`
`:code:`
`Requires ``database:view_credentials``
+ scope.
+ },
+ "user": "str" # Optional. The name of the user for use with the
+ connection pool. When excluded, all sessions connect to the database as the
+ inbound user.
}
}
# response body for status code(s): 404
@@ -112272,9 +120552,9 @@ async def get_user(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_user_request(
+ _request = build_databases_get_connection_pool_request(
database_cluster_uuid=database_cluster_uuid,
- username=username,
+ pool_name=pool_name,
headers=_headers,
params=_params,
)
@@ -112333,25 +120613,131 @@ async def get_user(
return cast(JSON, deserialized) # type: ignore
- @distributed_trace_async
- async def delete_user(
- self, database_cluster_uuid: str, username: str, **kwargs: Any
+ @overload
+ async def update_connection_pool(
+ self,
+ database_cluster_uuid: str,
+ pool_name: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Remove a Database User.
+ """Update Connection Pools (PostgreSQL).
- To remove a specific database user, send a DELETE request to
- ``/v2/databases/$DATABASE_ID/users/$USERNAME``.
+ To update a connection pool for a PostgreSQL database cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``.
- A status of 204 will be given. This indicates that the request was processed
- successfully, but that no response body is needed.
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param pool_name: The name used to identify the connection pool. Required.
+ :type pool_name: str
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
- Note: User management is not supported for Caching or Valkey clusters.
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "db": "str", # The database for use with the connection pool. Required.
+ "mode": "str", # The PGBouncer transaction mode for the connection pool. The
+ allowed values are session, transaction, and statement. Required.
+ "size": 0, # The desired size of the PGBouncer connection pool. The maximum
+ allowed size is determined by the size of the cluster's primary node. 25 backend
+ server connections are allowed for every 1GB of RAM. Three are reserved for
+ maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of
+ 22 backend server connections while one with 4 GB would allow for 97. Note that
+ these are shared across all connection pools in a cluster. Required.
+ "user": "str" # Optional. The name of the user for use with the connection
+ pool. When excluded, all sessions connect to the database as the inbound user.
+ }
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def update_connection_pool(
+ self,
+ database_cluster_uuid: str,
+ pool_name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Update Connection Pools (PostgreSQL).
+
+ To update a connection pool for a PostgreSQL database cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param username: The name of the database user. Required.
- :type username: str
+ :param pool_name: The name used to identify the connection pool. Required.
+ :type pool_name: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def update_connection_pool(
+ self,
+ database_cluster_uuid: str,
+ pool_name: str,
+ body: Union[JSON, IO[bytes]],
+ **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Update Connection Pools (PostgreSQL).
+
+ To update a connection pool for a PostgreSQL database cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param pool_name: The name used to identify the connection pool. Required.
+ :type pool_name: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
:return: JSON object or None
:rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
@@ -112359,6 +120745,21 @@ async def delete_user(
Example:
.. code-block:: python
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "db": "str", # The database for use with the connection pool. Required.
+ "mode": "str", # The PGBouncer transaction mode for the connection pool. The
+ allowed values are session, transaction, and statement. Required.
+ "size": 0, # The desired size of the PGBouncer connection pool. The maximum
+ allowed size is determined by the size of the cluster's primary node. 25 backend
+ server connections are allowed for every 1GB of RAM. Three are reserved for
+ maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of
+ 22 backend server connections while one with 4 GB would allow for 97. Note that
+ these are shared across all connection pools in a cluster. Required.
+ "user": "str" # Optional. The name of the user for use with the connection
+ pool. When excluded, all sessions connect to the database as the inbound user.
+ }
+
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -112384,14 +120785,28 @@ async def delete_user(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_databases_delete_user_request(
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_databases_update_connection_pool_request(
database_cluster_uuid=database_cluster_uuid,
- username=username,
+ pool_name=pool_name,
+ content_type=content_type,
+ json=_json,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -112446,180 +120861,30 @@ async def delete_user(
return deserialized # type: ignore
- @overload
- async def update_user(
- self,
- database_cluster_uuid: str,
- username: str,
- body: JSON,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> JSON:
+ @distributed_trace_async
+ async def delete_connection_pool(
+ self, database_cluster_uuid: str, pool_name: str, **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Update a Database User.
-
- To update an existing database user, send a PUT request to
- ``/v2/databases/$DATABASE_ID/users/$USERNAME``
- with the desired settings.
+ """Delete a Connection Pool (PostgreSQL).
- **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change
- the name of a user,
- you must recreate a new user.
+ To delete a specific connection pool for a PostgreSQL database cluster, send
+ a DELETE request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``.
- The response will be a JSON object with a key called ``user``. The value of this will be an
- object that contains the name of the update database user, along with the ``settings`` object
- that
- has been updated.
+ A status of 204 will be given. This indicates that the request was processed
+ successfully, but that no response body is needed.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param username: The name of the database user. Required.
- :type username: str
- :param body: Required.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :param pool_name: The name used to identify the connection pool. Required.
+ :type pool_name: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set applied to the
- ACL. 'consume' allows for messages to be consumed from the topic.
- 'produce' allows for messages to be published to the topic.
- 'produceconsume' allows for both 'consume' and 'produce' permission.
- 'admin' allows for 'produceconsume' as well as any operations to
- administer the topic (delete, update). Required. Known values are:
- "admin", "consume", "produce", and "produceconsume".
- "topic": "str", # A regex for matching the topic(s)
- that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for the ACL.
- Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to which the
- user should have access. When the database is set to ``admin``"" ,
- the user will have access to all databases based on the user's role
- i.e. a user with the role ``readOnly`` assigned to the ``admin``
- database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the user
- with each role mapping to a MongoDB built-in role. ``readOnly`` maps to
- a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for matching the
- indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission set
- applied to the ACL. 'read' allows user to read from the index.
- 'write' allows for user to write to the index. 'readwrite' allows for
- both 'read' and 'write' permission. 'deny'(default) restricts user
- from performing any operation over an index. 'admin' allows for
- 'readwrite' as well as any operations to administer the index. Known
- values are: "deny", "admin", "read", "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres clusters, set
- to ``true`` for a user with replication rights. This option is not currently
- supported for other database engines.
- }
- }
-
- # response body for status code(s): 201
- response == {
- "user": {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL 8.0,
- the default is ``caching_sha2_password``. Required. Known values are:
- "mysql_native_password" and "caching_sha2_password".
- },
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the database user's
- role. The value will be either "primary" or "normal". Known values are:
- "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set
- applied to the ACL. 'consume' allows for messages to be consumed
- from the topic. 'produce' allows for messages to be published to
- the topic. 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete, update).
- Required. Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A regex for matching the
- topic(s) that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for
- the ACL. Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to
- which the user should have access. When the database is set to
- ``admin``"" , the user will have access to all databases based on
- the user's role i.e. a user with the role ``readOnly`` assigned
- to the ``admin`` database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the
- user with each role mapping to a MongoDB built-in role. ``readOnly``
- maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for
- matching the indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission
- set applied to the ACL. 'read' allows user to read from the
- index. 'write' allows for user to write to the index. 'readwrite'
- allows for both 'read' and 'write' permission. 'deny'(default)
- restricts user from performing any operation over an index.
- 'admin' allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres
- clusters, set to ``true`` for a user with replication rights. This option
- is not currently supported for other database engines.
- }
- }
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -112632,169 +120897,95 @@ async def update_user(
tickets to help identify the issue.
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
- @overload
- async def update_user(
- self,
- database_cluster_uuid: str,
- username: str,
- body: IO[bytes],
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> JSON:
- # pylint: disable=line-too-long
- """Update a Database User.
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
- To update an existing database user, send a PUT request to
- ``/v2/databases/$DATABASE_ID/users/$USERNAME``
- with the desired settings.
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change
- the name of a user,
- you must recreate a new user.
+ _request = build_databases_delete_connection_pool_request(
+ database_cluster_uuid=database_cluster_uuid,
+ pool_name=pool_name,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
- The response will be a JSON object with a key called ``user``. The value of this will be an
- object that contains the name of the update database user, along with the ``settings`` object
- that
- has been updated.
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param username: The name of the database user. Required.
- :type username: str
- :param body: Required.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
+ response = pipeline_response.http_response
- Example:
- .. code-block:: python
+ if response.status_code not in [204, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
- # response body for status code(s): 201
- response == {
- "user": {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL 8.0,
- the default is ``caching_sha2_password``. Required. Known values are:
- "mysql_native_password" and "caching_sha2_password".
- },
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the database user's
- role. The value will be either "primary" or "normal". Known values are:
- "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set
- applied to the ACL. 'consume' allows for messages to be consumed
- from the topic. 'produce' allows for messages to be published to
- the topic. 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete, update).
- Required. Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A regex for matching the
- topic(s) that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for
- the ACL. Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to
- which the user should have access. When the database is set to
- ``admin``"" , the user will have access to all databases based on
- the user's role i.e. a user with the role ``readOnly`` assigned
- to the ``admin`` database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the
- user with each role mapping to a MongoDB built-in role. ``readOnly``
- maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for
- matching the indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission
- set applied to the ACL. 'read' allows user to read from the
- index. 'write' allows for user to write to the index. 'readwrite'
- allows for both 'read' and 'write' permission. 'deny'(default)
- restricts user from performing any operation over an index.
- 'admin' allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres
- clusters, set to ``true`` for a user with replication rights. This option
- is not currently supported for other database engines.
- }
- }
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 204:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
@distributed_trace_async
- async def update_user(
- self,
- database_cluster_uuid: str,
- username: str,
- body: Union[JSON, IO[bytes]],
- **kwargs: Any
+ async def get_eviction_policy(
+ self, database_cluster_uuid: str, **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update a Database User.
-
- To update an existing database user, send a PUT request to
- ``/v2/databases/$DATABASE_ID/users/$USERNAME``
- with the desired settings.
-
- **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change
- the name of a user,
- you must recreate a new user.
+ """Retrieve the Eviction Policy for a Caching or Valkey Cluster.
- The response will be a JSON object with a key called ``user``. The value of this will be an
- object that contains the name of the update database user, along with the ``settings`` object
- that
- has been updated.
+ To retrieve the configured eviction policy for an existing Caching or Valkey cluster, send a
+ GET request to ``/v2/databases/$DATABASE_ID/eviction_policy``.
+ The response will be a JSON object with an ``eviction_policy`` key. This will be set to a
+ string representing the eviction policy.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param username: The name of the database user. Required.
- :type username: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
- :type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -112802,137 +120993,18 @@ async def update_user(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set applied to the
- ACL. 'consume' allows for messages to be consumed from the topic.
- 'produce' allows for messages to be published to the topic.
- 'produceconsume' allows for both 'consume' and 'produce' permission.
- 'admin' allows for 'produceconsume' as well as any operations to
- administer the topic (delete, update). Required. Known values are:
- "admin", "consume", "produce", and "produceconsume".
- "topic": "str", # A regex for matching the topic(s)
- that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for the ACL.
- Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to which the
- user should have access. When the database is set to ``admin``"" ,
- the user will have access to all databases based on the user's role
- i.e. a user with the role ``readOnly`` assigned to the ``admin``
- database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the user
- with each role mapping to a MongoDB built-in role. ``readOnly`` maps to
- a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for matching the
- indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission set
- applied to the ACL. 'read' allows user to read from the index.
- 'write' allows for user to write to the index. 'readwrite' allows for
- both 'read' and 'write' permission. 'deny'(default) restricts user
- from performing any operation over an index. 'admin' allows for
- 'readwrite' as well as any operations to administer the index. Known
- values are: "deny", "admin", "read", "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres clusters, set
- to ``true`` for a user with replication rights. This option is not currently
- supported for other database engines.
- }
- }
-
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
- "user": {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL 8.0,
- the default is ``caching_sha2_password``. Required. Known values are:
- "mysql_native_password" and "caching_sha2_password".
- },
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the database user's
- role. The value will be either "primary" or "normal". Known values are:
- "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set
- applied to the ACL. 'consume' allows for messages to be consumed
- from the topic. 'produce' allows for messages to be published to
- the topic. 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete, update).
- Required. Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A regex for matching the
- topic(s) that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for
- the ACL. Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to
- which the user should have access. When the database is set to
- ``admin``"" , the user will have access to all databases based on
- the user's role i.e. a user with the role ``readOnly`` assigned
- to the ``admin`` database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the
- user with each role mapping to a MongoDB built-in role. ``readOnly``
- maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for
- matching the indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission
- set applied to the ACL. 'read' allows user to read from the
- index. 'write' allows for user to write to the index. 'readwrite'
- allows for both 'read' and 'write' permission. 'deny'(default)
- restricts user from performing any operation over an index.
- 'admin' allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres
- clusters, set to ``true`` for a user with replication rights. This option
- is not currently supported for other database engines.
- }
- }
+ "eviction_policy": "str" # A string specifying the desired eviction policy
+ for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data,
+ returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key,
+ least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random
+ order. * ``volatile_lru``"" : Evict keys with expiration only, least recently
+ used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a
+ random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest
+ time-to-live (TTL) first. Required. Known values are: "noeviction",
+ "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and
+ "volatile_ttl".
}
# response body for status code(s): 404
response == {
@@ -112959,28 +121031,13 @@ async def update_user(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
cls: ClsType[JSON] = kwargs.pop("cls", None)
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- _json = body
-
- _request = build_databases_update_user_request(
+ _request = build_databases_get_eviction_policy_request(
database_cluster_uuid=database_cluster_uuid,
- username=username,
- content_type=content_type,
- json=_json,
- content=_content,
headers=_headers,
params=_params,
)
@@ -112995,14 +121052,14 @@ async def update_user(
response = pipeline_response.http_response
- if response.status_code not in [201, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 201:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -113040,39 +121097,29 @@ async def update_user(
return cast(JSON, deserialized) # type: ignore
@overload
- async def reset_auth(
+ async def update_eviction_policy(
self,
database_cluster_uuid: str,
- username: str,
body: JSON,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Reset a Database User's Password or Authentication Method.
-
- To reset the password for a database user, send a POST request to
- ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``.
-
- For ``mysql`` databases, the authentication method can be specifying by
- including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin``
- value specified.
+ """Configure the Eviction Policy for a Caching or Valkey Cluster.
- The response will be a JSON object with a ``user`` key. This will be set to an
- object containing the standard database user attributes.
+ To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request
+ to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param username: The name of the database user. Required.
- :type username: str
:param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -113080,93 +121127,18 @@ async def reset_auth(
# JSON input template you can fill out and use as your body input.
body = {
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the authentication method
- to be used for connections to the MySQL user account. The valid values are
- ``mysql_native_password`` or ``caching_sha2_password``. If excluded when
- creating a new user, the default for the version of MySQL in use will be
- used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required.
- Known values are: "mysql_native_password" and "caching_sha2_password".
- }
+ "eviction_policy": "str" # A string specifying the desired eviction policy
+ for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data,
+ returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key,
+ least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random
+ order. * ``volatile_lru``"" : Evict keys with expiration only, least recently
+ used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a
+ random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest
+ time-to-live (TTL) first. Required. Known values are: "noeviction",
+ "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and
+ "volatile_ttl".
}
- # response body for status code(s): 200
- response == {
- "user": {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL 8.0,
- the default is ``caching_sha2_password``. Required. Known values are:
- "mysql_native_password" and "caching_sha2_password".
- },
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the database user's
- role. The value will be either "primary" or "normal". Known values are:
- "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set
- applied to the ACL. 'consume' allows for messages to be consumed
- from the topic. 'produce' allows for messages to be published to
- the topic. 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete, update).
- Required. Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A regex for matching the
- topic(s) that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for
- the ACL. Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to
- which the user should have access. When the database is set to
- ``admin``"" , the user will have access to all databases based on
- the user's role i.e. a user with the role ``readOnly`` assigned
- to the ``admin`` database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the
- user with each role mapping to a MongoDB built-in role. ``readOnly``
- maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for
- matching the indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission
- set applied to the ACL. 'read' allows user to read from the
- index. 'write' allows for user to write to the index. 'readwrite'
- allows for both 'read' and 'write' permission. 'deny'(default)
- restricts user from performing any operation over an index.
- 'admin' allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres
- clusters, set to ``true`` for a user with replication rights. This option
- is not currently supported for other database engines.
- }
- }
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -113181,121 +121153,34 @@ async def reset_auth(
"""
@overload
- async def reset_auth(
+ async def update_eviction_policy(
self,
database_cluster_uuid: str,
- username: str,
body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Reset a Database User's Password or Authentication Method.
-
- To reset the password for a database user, send a POST request to
- ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``.
-
- For ``mysql`` databases, the authentication method can be specifying by
- including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin``
- value specified.
+ """Configure the Eviction Policy for a Caching or Valkey Cluster.
- The response will be a JSON object with a ``user`` key. This will be set to an
- object containing the standard database user attributes.
+ To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request
+ to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param username: The name of the database user. Required.
- :type username: str
:param body: Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
- response == {
- "user": {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL 8.0,
- the default is ``caching_sha2_password``. Required. Known values are:
- "mysql_native_password" and "caching_sha2_password".
- },
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the database user's
- role. The value will be either "primary" or "normal". Known values are:
- "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set
- applied to the ACL. 'consume' allows for messages to be consumed
- from the topic. 'produce' allows for messages to be published to
- the topic. 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete, update).
- Required. Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A regex for matching the
- topic(s) that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for
- the ACL. Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to
- which the user should have access. When the database is set to
- ``admin``"" , the user will have access to all databases based on
- the user's role i.e. a user with the role ``readOnly`` assigned
- to the ``admin`` database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the
- user with each role mapping to a MongoDB built-in role. ``readOnly``
- maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for
- matching the indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission
- set applied to the ACL. 'read' allows user to read from the
- index. 'write' allows for user to write to the index. 'readwrite'
- allows for both 'read' and 'write' permission. 'deny'(default)
- restricts user from performing any operation over an index.
- 'admin' allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres
- clusters, set to ``true`` for a user with replication rights. This option
- is not currently supported for other database engines.
- }
- }
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -113310,128 +121195,40 @@ async def reset_auth(
"""
@distributed_trace_async
- async def reset_auth(
- self,
- database_cluster_uuid: str,
- username: str,
- body: Union[JSON, IO[bytes]],
- **kwargs: Any
- ) -> JSON:
+ async def update_eviction_policy(
+ self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Reset a Database User's Password or Authentication Method.
-
- To reset the password for a database user, send a POST request to
- ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``.
-
- For ``mysql`` databases, the authentication method can be specifying by
- including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin``
- value specified.
+ """Configure the Eviction Policy for a Caching or Valkey Cluster.
- The response will be a JSON object with a ``user`` key. This will be set to an
- object containing the standard database user attributes.
+ To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request
+ to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param username: The name of the database user. Required.
- :type username: str
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the authentication method
- to be used for connections to the MySQL user account. The valid values are
- ``mysql_native_password`` or ``caching_sha2_password``. If excluded when
- creating a new user, the default for the version of MySQL in use will be
- used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required.
- Known values are: "mysql_native_password" and "caching_sha2_password".
- }
- }
-
- # response body for status code(s): 200
- response == {
- "user": {
- "name": "str", # The name of a database user. Required.
- "access_cert": "str", # Optional. Access certificate for TLS client
- authentication. (Kafka only).
- "access_key": "str", # Optional. Access key for TLS client
- authentication. (Kafka only).
- "mysql_settings": {
- "auth_plugin": "str" # A string specifying the
- authentication method to be used for connections to the MySQL user
- account. The valid values are ``mysql_native_password`` or
- ``caching_sha2_password``. If excluded when creating a new user, the
- default for the version of MySQL in use will be used. As of MySQL 8.0,
- the default is ``caching_sha2_password``. Required. Known values are:
- "mysql_native_password" and "caching_sha2_password".
- },
- "password": "str", # Optional. A randomly generated password for the
- database user.:code:`
`Requires ``database:view_credentials`` scope.
- "role": "str", # Optional. A string representing the database user's
- role. The value will be either "primary" or "normal". Known values are:
- "primary" and "normal".
- "settings": {
- "acl": [
- {
- "permission": "str", # Permission set
- applied to the ACL. 'consume' allows for messages to be consumed
- from the topic. 'produce' allows for messages to be published to
- the topic. 'produceconsume' allows for both 'consume' and
- 'produce' permission. 'admin' allows for 'produceconsume' as well
- as any operations to administer the topic (delete, update).
- Required. Known values are: "admin", "consume", "produce", and
- "produceconsume".
- "topic": "str", # A regex for matching the
- topic(s) that this ACL should apply to. Required.
- "id": "str" # Optional. An identifier for
- the ACL. Will be computed after the ACL is created/updated.
- }
- ],
- "mongo_user_settings": {
- "databases": [
- "str" # Optional. A list of databases to
- which the user should have access. When the database is set to
- ``admin``"" , the user will have access to all databases based on
- the user's role i.e. a user with the role ``readOnly`` assigned
- to the ``admin`` database will have read access to all databases.
- ],
- "role": "str" # Optional. The role to assign to the
- user with each role mapping to a MongoDB built-in role. ``readOnly``
- maps to a `read
- `_
- role. ``readWrite`` maps to a `readWrite
- `_
- role. ``dbAdmin`` maps to a `dbAdmin
- `_
- role. Known values are: "readOnly", "readWrite", and "dbAdmin".
- },
- "opensearch_acl": [
- {
- "index": "str", # Optional. A regex for
- matching the indexes that this ACL should apply to.
- "permission": "str" # Optional. Permission
- set applied to the ACL. 'read' allows user to read from the
- index. 'write' allows for user to write to the index. 'readwrite'
- allows for both 'read' and 'write' permission. 'deny'(default)
- restricts user from performing any operation over an index.
- 'admin' allows for 'readwrite' as well as any operations to
- administer the index. Known values are: "deny", "admin", "read",
- "readwrite", and "write".
- }
- ],
- "pg_allow_replication": bool # Optional. For Postgres
- clusters, set to ``true`` for a user with replication rights. This option
- is not currently supported for other database engines.
- }
- }
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "eviction_policy": "str" # A string specifying the desired eviction policy
+ for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data,
+ returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key,
+ least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random
+ order. * ``volatile_lru``"" : Evict keys with expiration only, least recently
+ used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a
+ random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest
+ time-to-live (TTL) first. Required. Known values are: "noeviction",
+ "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and
+ "volatile_ttl".
}
+
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -113463,7 +121260,7 @@ async def reset_auth(
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -113473,9 +121270,8 @@ async def reset_auth(
else:
_json = body
- _request = build_databases_reset_auth_request(
+ _request = build_databases_update_eviction_policy_request(
database_cluster_uuid=database_cluster_uuid,
- username=username,
content_type=content_type,
json=_json,
content=_content,
@@ -113493,14 +121289,15 @@ async def reset_auth(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -113511,11 +121308,6 @@ async def reset_auth(
"int", response.headers.get("ratelimit-reset")
)
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -113533,22 +121325,19 @@ async def reset_auth(
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
@distributed_trace_async
- async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """List All Databases.
-
- To list all of the databases in a clusters, send a GET request to
- ``/v2/databases/$DATABASE_ID/dbs``.
-
- The result will be a JSON object with a ``dbs`` key. This will be set to an array
- of database objects, each of which will contain the standard database attributes.
+ """Retrieve the SQL Modes for a MySQL Cluster.
- Note: Database management is not supported for Caching or Valkey clusters.
+ To retrieve the configured SQL modes for an existing MySQL cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/sql_mode``.
+ The response will be a JSON object with a ``sql_mode`` key. This will be set to a string
+ representing the configured SQL modes.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -113561,11 +121350,8 @@ async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "dbs": [
- {
- "name": "str" # The name of the database. Required.
- }
- ]
+ "sql_mode": "str" # A string specifying the configured SQL modes for the
+ MySQL cluster. Required.
}
# response body for status code(s): 404
response == {
@@ -113597,7 +121383,7 @@ async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_list_request(
+ _request = build_databases_get_sql_mode_request(
database_cluster_uuid=database_cluster_uuid,
headers=_headers,
params=_params,
@@ -113658,24 +121444,22 @@ async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
return cast(JSON, deserialized) # type: ignore
@overload
- async def add(
+ async def update_sql_mode(
self,
database_cluster_uuid: str,
body: JSON,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Add a New Database.
-
- To add a new database to an existing cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/dbs``.
-
- Note: Database management is not supported for Caching or Valkey clusters.
+ """Update SQL Mode for a Cluster.
- The response will be a JSON object with a key called ``db``. The value of this will be
- an object that contains the standard attributes associated with a database.
+ To configure the SQL modes for an existing MySQL cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8
+ documentation for a `full list of supported SQL modes
+ `_.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -113684,8 +121468,8 @@ async def add(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -113693,15 +121477,10 @@ async def add(
# JSON input template you can fill out and use as your body input.
body = {
- "name": "str" # The name of the database. Required.
+ "sql_mode": "str" # A string specifying the configured SQL modes for the
+ MySQL cluster. Required.
}
- # response body for status code(s): 201
- response == {
- "db": {
- "name": "str" # The name of the database. Required.
- }
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -113716,24 +121495,22 @@ async def add(
"""
@overload
- async def add(
+ async def update_sql_mode(
self,
database_cluster_uuid: str,
body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Add a New Database.
-
- To add a new database to an existing cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/dbs``.
-
- Note: Database management is not supported for Caching or Valkey clusters.
+ """Update SQL Mode for a Cluster.
- The response will be a JSON object with a key called ``db``. The value of this will be
- an object that contains the standard attributes associated with a database.
+ To configure the SQL modes for an existing MySQL cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8
+ documentation for a `full list of supported SQL modes
+ `_.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -113742,19 +121519,13 @@ async def add(
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 201
- response == {
- "db": {
- "name": "str" # The name of the database. Required.
- }
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -113769,26 +121540,24 @@ async def add(
"""
@distributed_trace_async
- async def add(
+ async def update_sql_mode(
self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Add a New Database.
-
- To add a new database to an existing cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/dbs``.
-
- Note: Database management is not supported for Caching or Valkey clusters.
+ """Update SQL Mode for a Cluster.
- The response will be a JSON object with a key called ``db``. The value of this will be
- an object that contains the standard attributes associated with a database.
+ To configure the SQL modes for an existing MySQL cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8
+ documentation for a `full list of supported SQL modes
+ `_.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -113796,15 +121565,10 @@ async def add(
# JSON input template you can fill out and use as your body input.
body = {
- "name": "str" # The name of the database. Required.
+ "sql_mode": "str" # A string specifying the configured SQL modes for the
+ MySQL cluster. Required.
}
- # response body for status code(s): 201
- response == {
- "db": {
- "name": "str" # The name of the database. Required.
- }
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -113836,7 +121600,7 @@ async def add(
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -113846,7 +121610,7 @@ async def add(
else:
_json = body
- _request = build_databases_add_request(
+ _request = build_databases_update_sql_mode_request(
database_cluster_uuid=database_cluster_uuid,
content_type=content_type,
json=_json,
@@ -113865,14 +121629,15 @@ async def add(
response = pipeline_response.http_response
- if response.status_code not in [201, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 201:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -113883,11 +121648,6 @@ async def add(
"int", response.headers.get("ratelimit-reset")
)
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -113905,42 +121665,46 @@ async def add(
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
- @distributed_trace_async
- async def get(
- self, database_cluster_uuid: str, database_name: str, **kwargs: Any
- ) -> JSON:
+ @overload
+ async def update_major_version(
+ self,
+ database_cluster_uuid: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Retrieve an Existing Database.
-
- To show information about an existing database cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``.
-
- Note: Database management is not supported for Caching or Valkey clusters.
+ """Upgrade Major Version for a Database.
- The response will be a JSON object with a ``db`` key. This will be set to an object
- containing the standard database attributes.
+ To upgrade the major version of a database, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param database_name: The name of the database. Required.
- :type database_name: str
- :return: JSON object
- :rtype: JSON
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
- response == {
- "db": {
- "name": "str" # The name of the database. Required.
- }
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "version": "str" # Optional. A string representing the version of the
+ database engine in use for the cluster.
}
+
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -113953,104 +121717,65 @@ async def get(
tickets to help identify the issue.
}
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- _request = build_databases_get_request(
- database_cluster_uuid=database_cluster_uuid,
- database_name=database_name,
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- await self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
- if _stream:
- await response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ @overload
+ async def update_major_version(
+ self,
+ database_cluster_uuid: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Upgrade Major Version for a Database.
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ To upgrade the major version of a database, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version.
+ A successful request will receive a 204 No Content status code with no body in response.
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ Example:
+ .. code-block:: python
- return cast(JSON, deserialized) # type: ignore
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
@distributed_trace_async
- async def delete(
- self, database_cluster_uuid: str, database_name: str, **kwargs: Any
+ async def update_major_version(
+ self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Delete a Database.
-
- To delete a specific database, send a DELETE request to
- ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``.
-
- A status of 204 will be given. This indicates that the request was processed
- successfully, but that no response body is needed.
+ """Upgrade Major Version for a Database.
- Note: Database management is not supported for Caching or Valkey clusters.
+ To upgrade the major version of a database, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param database_name: The name of the database. Required.
- :type database_name: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
:return: JSON object or None
:rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
@@ -114058,6 +121783,12 @@ async def delete(
Example:
.. code-block:: python
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "version": "str" # Optional. A string representing the version of the
+ database engine in use for the cluster.
+ }
+
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -114083,14 +121814,27 @@ async def delete(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_databases_delete_request(
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_databases_update_major_version_request(
database_cluster_uuid=database_cluster_uuid,
- database_name=database_name,
+ content_type=content_type,
+ json=_json,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -114146,16 +121890,13 @@ async def delete(
return deserialized # type: ignore
@distributed_trace_async
- async def list_connection_pools(
- self, database_cluster_uuid: str, **kwargs: Any
- ) -> JSON:
+ async def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """List Connection Pools (PostgreSQL).
+ """Retrieve Autoscale Configuration for a Database Cluster.
- To list all of the connection pools available to a PostgreSQL database cluster, send a GET
- request to ``/v2/databases/$DATABASE_ID/pools``.
- The result will be a JSON object with a ``pools`` key. This will be set to an array of
- connection pool objects.
+ To retrieve the autoscale configuration for an existing database cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/autoscale``.
+ The response will be a JSON object with autoscaling configuration details.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -114168,111 +121909,17 @@ async def list_connection_pools(
# response body for status code(s): 200
response == {
- "pools": [
- {
- "db": "str", # The database for use with the connection
- pool. Required.
- "mode": "str", # The PGBouncer transaction mode for the
- connection pool. The allowed values are session, transaction, and
- statement. Required.
- "name": "str", # A unique name for the connection pool. Must
- be between 3 and 60 characters. Required.
- "size": 0, # The desired size of the PGBouncer connection
- pool. The maximum allowed size is determined by the size of the cluster's
- primary node. 25 backend server connections are allowed for every 1GB of
- RAM. Three are reserved for maintenance. For example, a primary node with
- 1 GB of RAM allows for a maximum of 22 backend server connections while
- one with 4 GB would allow for 97. Note that these are shared across all
- connection pools in a cluster. Required.
- "connection": {
- "database": "str", # Optional. The name of the
- default database.
- "host": "str", # Optional. The FQDN pointing to the
- database cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- database cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the
- format accepted by the ``psql`` command. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "private_connection": {
- "database": "str", # Optional. The name of the
- default database.
- "host": "str", # Optional. The FQDN pointing to the
- database cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- database cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the
- format accepted by the ``psql`` command. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "standby_connection": {
- "database": "str", # Optional. The name of the
- default database.
- "host": "str", # Optional. The FQDN pointing to the
- database cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- database cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the
- format accepted by the ``psql`` command. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the
- default database.
- "host": "str", # Optional. The FQDN pointing to the
- database cluster's current primary node.
- "password": "str", # Optional. The randomly
- generated password for the default
- user.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- "port": 0, # Optional. The port on which the
- database cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating
- if the connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the
- format accepted by the ``psql`` command. This is provided as a
- convenience and should be able to be constructed by the other
- attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- },
- "user": "str" # Optional. The name of the user for use with
- the connection pool. When excluded, all sessions connect to the database
- as the inbound user.
+ "autoscale": {
+ "storage": {
+ "enabled": bool, # Whether storage autoscaling is enabled
+ for the cluster. Required.
+ "increment_gib": 0, # Optional. The amount of additional
+ storage to add (in GiB) when autoscaling is triggered.
+ "threshold_percent": 0 # Optional. The storage usage
+ threshold percentage that triggers autoscaling. When storage usage
+ exceeds this percentage, additional storage will be added automatically.
}
- ]
+ }
}
# response body for status code(s): 404
response == {
@@ -114304,7 +121951,7 @@ async def list_connection_pools(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_list_connection_pools_request(
+ _request = build_databases_get_autoscale_request(
database_cluster_uuid=database_cluster_uuid,
headers=_headers,
params=_params,
@@ -114365,28 +122012,20 @@ async def list_connection_pools(
return cast(JSON, deserialized) # type: ignore
@overload
- async def add_connection_pool(
+ async def update_autoscale(
self,
database_cluster_uuid: str,
body: JSON,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Add a New Connection Pool (PostgreSQL).
-
- For PostgreSQL database clusters, connection pools can be used to allow a
- database to share its idle connections. The popular PostgreSQL connection
- pooling utility PgBouncer is used to provide this service. `See here for more information
- `_
- about how and why to use PgBouncer connection pooling including
- details about the available transaction modes.
+ """Configure Autoscale Settings for a Database Cluster.
- To add a new connection pool to a PostgreSQL database cluster, send a POST
- request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool,
- the user to connect with, the database to connect to, as well as its desired
- size and transaction mode.
+ To configure autoscale settings for an existing database cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -114395,8 +122034,8 @@ async def add_connection_pool(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -114404,191 +122043,18 @@ async def add_connection_pool(
# JSON input template you can fill out and use as your body input.
body = {
- "db": "str", # The database for use with the connection pool. Required.
- "mode": "str", # The PGBouncer transaction mode for the connection pool. The
- allowed values are session, transaction, and statement. Required.
- "name": "str", # A unique name for the connection pool. Must be between 3
- and 60 characters. Required.
- "size": 0, # The desired size of the PGBouncer connection pool. The maximum
- allowed size is determined by the size of the cluster's primary node. 25 backend
- server connections are allowed for every 1GB of RAM. Three are reserved for
- maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of
- 22 backend server connections while one with 4 GB would allow for 97. Note that
- these are shared across all connection pools in a cluster. Required.
- "connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "user": "str" # Optional. The name of the user for use with the connection
- pool. When excluded, all sessions connect to the database as the inbound user.
- }
-
- # response body for status code(s): 201
- response == {
- "pool": {
- "db": "str", # The database for use with the connection pool.
- Required.
- "mode": "str", # The PGBouncer transaction mode for the connection
- pool. The allowed values are session, transaction, and statement. Required.
- "name": "str", # A unique name for the connection pool. Must be
- between 3 and 60 characters. Required.
- "size": 0, # The desired size of the PGBouncer connection pool. The
- maximum allowed size is determined by the size of the cluster's primary node.
- 25 backend server connections are allowed for every 1GB of RAM. Three are
- reserved for maintenance. For example, a primary node with 1 GB of RAM allows
- for a maximum of 22 backend server connections while one with 4 GB would
- allow for 97. Note that these are shared across all connection pools in a
+ "storage": {
+ "enabled": bool, # Whether storage autoscaling is enabled for the
cluster. Required.
- "connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "user": "str" # Optional. The name of the user for use with the
- connection pool. When excluded, all sessions connect to the database as the
- inbound user.
+ "increment_gib": 0, # Optional. The amount of additional storage to
+ add (in GiB) when autoscaling is triggered.
+ "threshold_percent": 0 # Optional. The storage usage threshold
+ percentage that triggers autoscaling. When storage usage exceeds this
+ percentage, additional storage will be added automatically.
}
}
- # response body for status code(s): 404
+
+ # response body for status code(s): 404, 422
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -114602,28 +122068,20 @@ async def add_connection_pool(
"""
@overload
- async def add_connection_pool(
+ async def update_autoscale(
self,
database_cluster_uuid: str,
body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Add a New Connection Pool (PostgreSQL).
-
- For PostgreSQL database clusters, connection pools can be used to allow a
- database to share its idle connections. The popular PostgreSQL connection
- pooling utility PgBouncer is used to provide this service. `See here for more information
- `_
- about how and why to use PgBouncer connection pooling including
- details about the available transaction modes.
+ """Configure Autoscale Settings for a Database Cluster.
- To add a new connection pool to a PostgreSQL database cluster, send a POST
- request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool,
- the user to connect with, the database to connect to, as well as its desired
- size and transaction mode.
+ To configure autoscale settings for an existing database cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -114632,111 +122090,14 @@ async def add_connection_pool(
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 201
- response == {
- "pool": {
- "db": "str", # The database for use with the connection pool.
- Required.
- "mode": "str", # The PGBouncer transaction mode for the connection
- pool. The allowed values are session, transaction, and statement. Required.
- "name": "str", # A unique name for the connection pool. Must be
- between 3 and 60 characters. Required.
- "size": 0, # The desired size of the PGBouncer connection pool. The
- maximum allowed size is determined by the size of the cluster's primary node.
- 25 backend server connections are allowed for every 1GB of RAM. Three are
- reserved for maintenance. For example, a primary node with 1 GB of RAM allows
- for a maximum of 22 backend server connections while one with 4 GB would
- allow for 97. Note that these are shared across all connection pools in a
- cluster. Required.
- "connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "user": "str" # Optional. The name of the user for use with the
- connection pool. When excluded, all sessions connect to the database as the
- inbound user.
- }
- }
- # response body for status code(s): 404
+ # response body for status code(s): 404, 422
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -114750,222 +122111,41 @@ async def add_connection_pool(
"""
@distributed_trace_async
- async def add_connection_pool(
+ async def update_autoscale(
self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Add a New Connection Pool (PostgreSQL).
-
- For PostgreSQL database clusters, connection pools can be used to allow a
- database to share its idle connections. The popular PostgreSQL connection
- pooling utility PgBouncer is used to provide this service. `See here for more information
- `_
- about how and why to use PgBouncer connection pooling including
- details about the available transaction modes.
+ """Configure Autoscale Settings for a Database Cluster.
- To add a new connection pool to a PostgreSQL database cluster, send a POST
- request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool,
- the user to connect with, the database to connect to, as well as its desired
- size and transaction mode.
+ To configure autoscale settings for an existing database cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration.
+ A successful request will receive a 204 No Content status code with no body in response.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "db": "str", # The database for use with the connection pool. Required.
- "mode": "str", # The PGBouncer transaction mode for the connection pool. The
- allowed values are session, transaction, and statement. Required.
- "name": "str", # A unique name for the connection pool. Must be between 3
- and 60 characters. Required.
- "size": 0, # The desired size of the PGBouncer connection pool. The maximum
- allowed size is determined by the size of the cluster's primary node. 25 backend
- server connections are allowed for every 1GB of RAM. Three are reserved for
- maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of
- 22 backend server connections while one with 4 GB would allow for 97. Note that
- these are shared across all connection pools in a cluster. Required.
- "connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the default database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated password for
- the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database cluster is
- listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format accepted
- by the ``psql`` command. This is provided as a convenience and should be able
- to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "user": "str" # Optional. The name of the user for use with the connection
- pool. When excluded, all sessions connect to the database as the inbound user.
- }
+ Example:
+ .. code-block:: python
- # response body for status code(s): 201
- response == {
- "pool": {
- "db": "str", # The database for use with the connection pool.
- Required.
- "mode": "str", # The PGBouncer transaction mode for the connection
- pool. The allowed values are session, transaction, and statement. Required.
- "name": "str", # A unique name for the connection pool. Must be
- between 3 and 60 characters. Required.
- "size": 0, # The desired size of the PGBouncer connection pool. The
- maximum allowed size is determined by the size of the cluster's primary node.
- 25 backend server connections are allowed for every 1GB of RAM. Three are
- reserved for maintenance. For example, a primary node with 1 GB of RAM allows
- for a maximum of 22 backend server connections while one with 4 GB would
- allow for 97. Note that these are shared across all connection pools in a
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "storage": {
+ "enabled": bool, # Whether storage autoscaling is enabled for the
cluster. Required.
- "connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "user": "str" # Optional. The name of the user for use with the
- connection pool. When excluded, all sessions connect to the database as the
- inbound user.
+ "increment_gib": 0, # Optional. The amount of additional storage to
+ add (in GiB) when autoscaling is triggered.
+ "threshold_percent": 0 # Optional. The storage usage threshold
+ percentage that triggers autoscaling. When storage usage exceeds this
+ percentage, additional storage will be added automatically.
}
}
- # response body for status code(s): 404
+
+ # response body for status code(s): 404, 422
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -114996,7 +122176,7 @@ async def add_connection_pool(
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -115006,7 +122186,7 @@ async def add_connection_pool(
else:
_json = body
- _request = build_databases_add_connection_pool_request(
+ _request = build_databases_update_autoscale_request(
database_cluster_uuid=database_cluster_uuid,
content_type=content_type,
json=_json,
@@ -115025,14 +122205,26 @@ async def add_connection_pool(
response = pipeline_response.http_response
- if response.status_code not in [201, 404]:
+ if response.status_code not in [204, 404, 422]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 201:
+ if response.status_code == 204:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -115048,7 +122240,7 @@ async def add_connection_pool(
else:
deserialized = None
- if response.status_code == 404:
+ if response.status_code == 422:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -115065,25 +122257,24 @@ async def add_connection_pool(
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
@distributed_trace_async
- async def get_connection_pool(
- self, database_cluster_uuid: str, pool_name: str, **kwargs: Any
+ async def list_kafka_topics(
+ self, database_cluster_uuid: str, **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Retrieve Existing Connection Pool (PostgreSQL).
+ """List Topics for a Kafka Cluster.
- To show information about an existing connection pool for a PostgreSQL database cluster, send a
- GET request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``.
- The response will be a JSON object with a ``pool`` key.
+ To list all of a Kafka cluster's topics, send a GET request to
+ ``/v2/databases/$DATABASE_ID/topics``.
+
+ The result will be a JSON object with a ``topics`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param pool_name: The name used to identify the connection pool. Required.
- :type pool_name: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -115093,100 +122284,17 @@ async def get_connection_pool(
# response body for status code(s): 200
response == {
- "pool": {
- "db": "str", # The database for use with the connection pool.
- Required.
- "mode": "str", # The PGBouncer transaction mode for the connection
- pool. The allowed values are session, transaction, and statement. Required.
- "name": "str", # A unique name for the connection pool. Must be
- between 3 and 60 characters. Required.
- "size": 0, # The desired size of the PGBouncer connection pool. The
- maximum allowed size is determined by the size of the cluster's primary node.
- 25 backend server connections are allowed for every 1GB of RAM. Three are
- reserved for maintenance. For example, a primary node with 1 GB of RAM allows
- for a maximum of 22 backend server connections while one with 4 GB would
- allow for 97. Note that these are shared across all connection pools in a
- cluster. Required.
- "connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "standby_private_connection": {
- "database": "str", # Optional. The name of the default
- database.
- "host": "str", # Optional. The FQDN pointing to the database
- cluster's current primary node.
- "password": "str", # Optional. The randomly generated
- password for the default user.:code:`
`:code:`
`Requires
- ``database:view_credentials`` scope.
- "port": 0, # Optional. The port on which the database
- cluster is listening.
- "ssl": bool, # Optional. A boolean value indicating if the
- connection should be made over SSL.
- "uri": "str", # Optional. A connection string in the format
- accepted by the ``psql`` command. This is provided as a convenience and
- should be able to be constructed by the other attributes.
- "user": "str" # Optional. The default user for the
- database.:code:`
`:code:`
`Requires ``database:view_credentials``
- scope.
- },
- "user": "str" # Optional. The name of the user for use with the
- connection pool. When excluded, all sessions connect to the database as the
- inbound user.
- }
+ "topics": [
+ {
+ "name": "str", # Optional. The name of the Kafka topic.
+ "partition_count": 0, # Optional. The number of partitions
+ available for the topic. On update, this value can only be increased.
+ "replication_factor": 0, # Optional. The number of nodes to
+ replicate data across the cluster.
+ "state": "str" # Optional. The state of the Kafka topic.
+ Known values are: "active", "configuring", "deleting", and "unknown".
+ }
+ ]
}
# response body for status code(s): 404
response == {
@@ -115218,9 +122326,8 @@ async def get_connection_pool(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_connection_pool_request(
+ _request = build_databases_list_kafka_topics_request(
database_cluster_uuid=database_cluster_uuid,
- pool_name=pool_name,
headers=_headers,
params=_params,
)
@@ -115280,32 +122387,31 @@ async def get_connection_pool(
return cast(JSON, deserialized) # type: ignore
@overload
- async def update_connection_pool(
+ async def create_kafka_topic(
self,
database_cluster_uuid: str,
- pool_name: str,
- body: JSON,
+ body: Optional[JSON] = None,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Update Connection Pools (PostgreSQL).
+ """Create Topic for a Kafka Cluster.
- To update a connection pool for a PostgreSQL database cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``.
+ To create a topic attached to a Kafka cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/topics``.
+
+ The result will be a JSON object with a ``topic`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param pool_name: The name used to identify the connection pool. Required.
- :type pool_name: str
- :param body: Required.
+ :param body: Default value is None.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -115313,19 +122419,223 @@ async def update_connection_pool(
# JSON input template you can fill out and use as your body input.
body = {
- "db": "str", # The database for use with the connection pool. Required.
- "mode": "str", # The PGBouncer transaction mode for the connection pool. The
- allowed values are session, transaction, and statement. Required.
- "size": 0, # The desired size of the PGBouncer connection pool. The maximum
- allowed size is determined by the size of the cluster's primary node. 25 backend
- server connections are allowed for every 1GB of RAM. Three are reserved for
- maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of
- 22 backend server connections while one with 4 GB would allow for 97. Note that
- these are shared across all connection pools in a cluster. Required.
- "user": "str" # Optional. The name of the user for use with the connection
- pool. When excluded, all sessions connect to the database as the inbound user.
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is "delete".
+ The cleanup_policy sets the retention policy to use on log segments. 'delete'
+ will discard old segments when retention time/size limits are reached.
+ 'compact' will enable log compaction, resulting in retention of the latest
+ value for each key. Known values are: "delete", "compact", and
+ "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the topic.
+ Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value is
+ 86400000. The delete_retention_ms specifies how long (in ms) to retain delete
+ tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is 60000.
+ The file_delete_delay_ms specifies the time (in ms) to wait before deleting a
+ file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default value is
+ 9223372036854776000. The flush_messages specifies the number of messages to
+ accumulate on a log partition before messages are flushed to disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value is
+ 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a
+ message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is 4096. The
+ index_interval_bytes specifies the number of bytes between entries being
+ added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum
+ amount of time (in ms) that a message will remain uncompacted. This is only
+ applicable if the logs are have compaction enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is 1048588.
+ The max_messages_bytes specifies the largest record batch size (in bytes)
+ that can be sent to the server. This is calculated after compression if
+ compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default value is
+ True. The message_down_conversion_enable specifies whether down-conversion of
+ message formats is enabled to satisfy consumer requests. When 'false', the
+ broker will not perform conversion for consumers expecting older message
+ formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for
+ consume requests from these older clients.
+ "message_format_version": "3.0-IV1", # Optional. Default value is
+ "3.0-IV1". The message_format_version specifies the message format version
+ used by the broker to append messages to the logs. The value of this setting
+ is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By
+ setting a particular message format version, all existing messages on disk
+ must be smaller or equal to the specified version. Known values are: "0.8.0",
+ "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0",
+ "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1",
+ "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0",
+ "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0",
+ "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1",
+ "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default value
+ is "create_time". The message_timestamp_type specifies whether to use the
+ message create time or log append time as the timestamp on a message. Known
+ values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5.
+ The min_cleanable_dirty_ratio specifies the frequency of log compaction (if
+ enabled) in relation to duplicates present in the logs. For example, at 0.5,
+ at most 50% of the log could be duplicates before compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0. The
+ min_compaction_lag_ms specifies the minimum time (in ms) that a message will
+ remain uncompacted in the log. Only relevant if log compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1. The
+ min_insync_replicas specifies the number of replicas that must ACK a write
+ for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False. The
+ preallocate specifies whether a file should be preallocated on disk when
+ creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is 604800000.
+ The retention_ms specifies the maximum amount of time (in ms) to keep a
+ message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is 209715200.
+ The segment_bytes specifies the maximum size of a single log file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is 604800000. The
+ segment_ms specifies the period of time after which the log will be forced to
+ roll if the segment file isn't full. This ensures that retention can delete
+ or compact old data.
+ },
+ "name": "str", # Optional. The name of the Kafka topic.
+ "partition_count": 0, # Optional. The number of partitions available for the
+ topic. On update, this value can only be increased.
+ "replication_factor": 0 # Optional. The number of nodes to replicate data
+ across the cluster.
}
+ # response body for status code(s): 201
+ response == {
+ "topic": {
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is
+ "delete". The cleanup_policy sets the retention policy to use on log
+ segments. 'delete' will discard old segments when retention time/size
+ limits are reached. 'compact' will enable log compaction, resulting in
+ retention of the latest value for each key. Known values are: "delete",
+ "compact", and "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the
+ topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value
+ is 86400000. The delete_retention_ms specifies how long (in ms) to retain
+ delete tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is
+ 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
+ deleting a file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The flush_messages specifies the number of
+ messages to accumulate on a log partition before messages are flushed to
+ disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value
+ is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
+ that a message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is
+ 4096. The index_interval_bytes specifies the number of bytes between
+ entries being added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional.
+ Default value is 9223372036854776000. The max_compaction_lag_ms specifies
+ the maximum amount of time (in ms) that a message will remain
+ uncompacted. This is only applicable if the logs are have compaction
+ enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is
+ 1048588. The max_messages_bytes specifies the largest record batch size
+ (in bytes) that can be sent to the server. This is calculated after
+ compression if compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default
+ value is True. The message_down_conversion_enable specifies whether
+ down-conversion of message formats is enabled to satisfy consumer
+ requests. When 'false', the broker will not perform conversion for
+ consumers expecting older message formats. The broker will respond with
+ an ``UNSUPPORTED_VERSION`` error for consume requests from these older
+ clients.
+ "message_format_version": "3.0-IV1", # Optional. Default
+ value is "3.0-IV1". The message_format_version specifies the message
+ format version used by the broker to append messages to the logs. The
+ value of this setting is assumed to be 3.0-IV1 if the broker protocol
+ version is 3.0 or higher. By setting a particular message format
+ version, all existing messages on disk must be smaller or equal to the
+ specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
+ "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
+ "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
+ "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
+ "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
+ "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
+ "3.3-IV1", "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default
+ value is "create_time". The message_timestamp_type specifies whether to
+ use the message create time or log append time as the timestamp on a
+ message. Known values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
+ is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
+ compaction (if enabled) in relation to duplicates present in the logs.
+ For example, at 0.5, at most 50% of the log could be duplicates before
+ compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0.
+ The min_compaction_lag_ms specifies the minimum time (in ms) that a
+ message will remain uncompacted in the log. Only relevant if log
+ compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1.
+ The min_insync_replicas specifies the number of replicas that must ACK a
+ write for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False.
+ The preallocate specifies whether a file should be preallocated on disk
+ when creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is
+ 604800000. The retention_ms specifies the maximum amount of time (in ms)
+ to keep a message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is
+ 209715200. The segment_bytes specifies the maximum size of a single log
+ file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is
+ 604800000. The segment_ms specifies the period of time after which the
+ log will be forced to roll if the segment file isn't full. This ensures
+ that retention can delete or compact old data.
+ },
+ "name": "str", # Optional. The name of the Kafka topic.
+ "partitions": [
+ {
+ "consumer_groups": [
+ {
+ "group_name": "str", # Optional.
+ Name of the consumer group.
+ "offset": 0 # Optional. The current
+ offset of the consumer group.
+ }
+ ],
+ "earliest_offset": 0, # Optional. The earliest
+ consumer offset amongst consumer groups.
+ "id": 0, # Optional. An identifier for the
+ partition.
+ "in_sync_replicas": 0, # Optional. The number of
+ nodes that are in-sync (have the latest data) for the given
+ partition.
+ "size": 0 # Optional. Size of the topic partition in
+ bytes.
+ }
+ ],
+ "replication_factor": 0, # Optional. The number of nodes to
+ replicate data across the cluster.
+ "state": "str" # Optional. The state of the Kafka topic. Known
+ values are: "active", "configuring", "deleting", and "unknown".
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -115340,37 +122650,160 @@ async def update_connection_pool(
"""
@overload
- async def update_connection_pool(
+ async def create_kafka_topic(
self,
database_cluster_uuid: str,
- pool_name: str,
- body: IO[bytes],
+ body: Optional[IO[bytes]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Update Connection Pools (PostgreSQL).
+ """Create Topic for a Kafka Cluster.
- To update a connection pool for a PostgreSQL database cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``.
+ To create a topic attached to a Kafka cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/topics``.
+
+ The result will be a JSON object with a ``topic`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param pool_name: The name used to identify the connection pool. Required.
- :type pool_name: str
- :param body: Required.
+ :param body: Default value is None.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
+ # response body for status code(s): 201
+ response == {
+ "topic": {
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is
+ "delete". The cleanup_policy sets the retention policy to use on log
+ segments. 'delete' will discard old segments when retention time/size
+ limits are reached. 'compact' will enable log compaction, resulting in
+ retention of the latest value for each key. Known values are: "delete",
+ "compact", and "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the
+ topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value
+ is 86400000. The delete_retention_ms specifies how long (in ms) to retain
+ delete tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is
+ 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
+ deleting a file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The flush_messages specifies the number of
+ messages to accumulate on a log partition before messages are flushed to
+ disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value
+ is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
+ that a message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is
+ 4096. The index_interval_bytes specifies the number of bytes between
+ entries being added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional.
+ Default value is 9223372036854776000. The max_compaction_lag_ms specifies
+ the maximum amount of time (in ms) that a message will remain
+ uncompacted. This is only applicable if the logs are have compaction
+ enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is
+ 1048588. The max_messages_bytes specifies the largest record batch size
+ (in bytes) that can be sent to the server. This is calculated after
+ compression if compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default
+ value is True. The message_down_conversion_enable specifies whether
+ down-conversion of message formats is enabled to satisfy consumer
+ requests. When 'false', the broker will not perform conversion for
+ consumers expecting older message formats. The broker will respond with
+ an ``UNSUPPORTED_VERSION`` error for consume requests from these older
+ clients.
+ "message_format_version": "3.0-IV1", # Optional. Default
+ value is "3.0-IV1". The message_format_version specifies the message
+ format version used by the broker to append messages to the logs. The
+ value of this setting is assumed to be 3.0-IV1 if the broker protocol
+ version is 3.0 or higher. By setting a particular message format
+ version, all existing messages on disk must be smaller or equal to the
+ specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
+ "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
+ "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
+ "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
+ "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
+ "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
+ "3.3-IV1", "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default
+ value is "create_time". The message_timestamp_type specifies whether to
+ use the message create time or log append time as the timestamp on a
+ message. Known values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
+ is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
+ compaction (if enabled) in relation to duplicates present in the logs.
+ For example, at 0.5, at most 50% of the log could be duplicates before
+ compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0.
+ The min_compaction_lag_ms specifies the minimum time (in ms) that a
+ message will remain uncompacted in the log. Only relevant if log
+ compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1.
+ The min_insync_replicas specifies the number of replicas that must ACK a
+ write for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False.
+ The preallocate specifies whether a file should be preallocated on disk
+ when creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is
+ 604800000. The retention_ms specifies the maximum amount of time (in ms)
+ to keep a message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is
+ 209715200. The segment_bytes specifies the maximum size of a single log
+ file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is
+ 604800000. The segment_ms specifies the period of time after which the
+ log will be forced to roll if the segment file isn't full. This ensures
+ that retention can delete or compact old data.
+ },
+ "name": "str", # Optional. The name of the Kafka topic.
+ "partitions": [
+ {
+ "consumer_groups": [
+ {
+ "group_name": "str", # Optional.
+ Name of the consumer group.
+ "offset": 0 # Optional. The current
+ offset of the consumer group.
+ }
+ ],
+ "earliest_offset": 0, # Optional. The earliest
+ consumer offset amongst consumer groups.
+ "id": 0, # Optional. An identifier for the
+ partition.
+ "in_sync_replicas": 0, # Optional. The number of
+ nodes that are in-sync (have the latest data) for the given
+ partition.
+ "size": 0 # Optional. Size of the topic partition in
+ bytes.
+ }
+ ],
+ "replication_factor": 0, # Optional. The number of nodes to
+ replicate data across the cluster.
+ "state": "str" # Optional. The state of the Kafka topic. Known
+ values are: "active", "configuring", "deleting", and "unknown".
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -115385,27 +122818,26 @@ async def update_connection_pool(
"""
@distributed_trace_async
- async def update_connection_pool(
+ async def create_kafka_topic(
self,
database_cluster_uuid: str,
- pool_name: str,
- body: Union[JSON, IO[bytes]],
+ body: Optional[Union[JSON, IO[bytes]]] = None,
**kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Update Connection Pools (PostgreSQL).
+ """Create Topic for a Kafka Cluster.
- To update a connection pool for a PostgreSQL database cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``.
+ To create a topic attached to a Kafka cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/topics``.
+
+ The result will be a JSON object with a ``topic`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param pool_name: The name used to identify the connection pool. Required.
- :type pool_name: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
:type body: JSON or IO[bytes]
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -115413,19 +122845,223 @@ async def update_connection_pool(
# JSON input template you can fill out and use as your body input.
body = {
- "db": "str", # The database for use with the connection pool. Required.
- "mode": "str", # The PGBouncer transaction mode for the connection pool. The
- allowed values are session, transaction, and statement. Required.
- "size": 0, # The desired size of the PGBouncer connection pool. The maximum
- allowed size is determined by the size of the cluster's primary node. 25 backend
- server connections are allowed for every 1GB of RAM. Three are reserved for
- maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of
- 22 backend server connections while one with 4 GB would allow for 97. Note that
- these are shared across all connection pools in a cluster. Required.
- "user": "str" # Optional. The name of the user for use with the connection
- pool. When excluded, all sessions connect to the database as the inbound user.
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is "delete".
+ The cleanup_policy sets the retention policy to use on log segments. 'delete'
+ will discard old segments when retention time/size limits are reached.
+ 'compact' will enable log compaction, resulting in retention of the latest
+ value for each key. Known values are: "delete", "compact", and
+ "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the topic.
+ Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value is
+ 86400000. The delete_retention_ms specifies how long (in ms) to retain delete
+ tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is 60000.
+ The file_delete_delay_ms specifies the time (in ms) to wait before deleting a
+ file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default value is
+ 9223372036854776000. The flush_messages specifies the number of messages to
+ accumulate on a log partition before messages are flushed to disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value is
+ 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a
+ message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is 4096. The
+ index_interval_bytes specifies the number of bytes between entries being
+ added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum
+ amount of time (in ms) that a message will remain uncompacted. This is only
+ applicable if the logs are have compaction enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is 1048588.
+ The max_messages_bytes specifies the largest record batch size (in bytes)
+ that can be sent to the server. This is calculated after compression if
+ compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default value is
+ True. The message_down_conversion_enable specifies whether down-conversion of
+ message formats is enabled to satisfy consumer requests. When 'false', the
+ broker will not perform conversion for consumers expecting older message
+ formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for
+ consume requests from these older clients.
+ "message_format_version": "3.0-IV1", # Optional. Default value is
+ "3.0-IV1". The message_format_version specifies the message format version
+ used by the broker to append messages to the logs. The value of this setting
+ is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By
+ setting a particular message format version, all existing messages on disk
+ must be smaller or equal to the specified version. Known values are: "0.8.0",
+ "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0",
+ "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1",
+ "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0",
+ "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0",
+ "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1",
+ "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default value
+ is "create_time". The message_timestamp_type specifies whether to use the
+ message create time or log append time as the timestamp on a message. Known
+ values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5.
+ The min_cleanable_dirty_ratio specifies the frequency of log compaction (if
+ enabled) in relation to duplicates present in the logs. For example, at 0.5,
+ at most 50% of the log could be duplicates before compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0. The
+ min_compaction_lag_ms specifies the minimum time (in ms) that a message will
+ remain uncompacted in the log. Only relevant if log compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1. The
+ min_insync_replicas specifies the number of replicas that must ACK a write
+ for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False. The
+ preallocate specifies whether a file should be preallocated on disk when
+ creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is 604800000.
+ The retention_ms specifies the maximum amount of time (in ms) to keep a
+ message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is 209715200.
+ The segment_bytes specifies the maximum size of a single log file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is 604800000. The
+ segment_ms specifies the period of time after which the log will be forced to
+ roll if the segment file isn't full. This ensures that retention can delete
+ or compact old data.
+ },
+ "name": "str", # Optional. The name of the Kafka topic.
+ "partition_count": 0, # Optional. The number of partitions available for the
+ topic. On update, this value can only be increased.
+ "replication_factor": 0 # Optional. The number of nodes to replicate data
+ across the cluster.
}
+ # response body for status code(s): 201
+ response == {
+ "topic": {
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is
+ "delete". The cleanup_policy sets the retention policy to use on log
+ segments. 'delete' will discard old segments when retention time/size
+ limits are reached. 'compact' will enable log compaction, resulting in
+ retention of the latest value for each key. Known values are: "delete",
+ "compact", and "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the
+ topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value
+ is 86400000. The delete_retention_ms specifies how long (in ms) to retain
+ delete tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is
+ 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
+ deleting a file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The flush_messages specifies the number of
+ messages to accumulate on a log partition before messages are flushed to
+ disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value
+ is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
+ that a message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is
+ 4096. The index_interval_bytes specifies the number of bytes between
+ entries being added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional.
+ Default value is 9223372036854776000. The max_compaction_lag_ms specifies
+ the maximum amount of time (in ms) that a message will remain
+ uncompacted. This is only applicable if the logs are have compaction
+ enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is
+ 1048588. The max_messages_bytes specifies the largest record batch size
+ (in bytes) that can be sent to the server. This is calculated after
+ compression if compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default
+ value is True. The message_down_conversion_enable specifies whether
+ down-conversion of message formats is enabled to satisfy consumer
+ requests. When 'false', the broker will not perform conversion for
+ consumers expecting older message formats. The broker will respond with
+ an ``UNSUPPORTED_VERSION`` error for consume requests from these older
+ clients.
+ "message_format_version": "3.0-IV1", # Optional. Default
+ value is "3.0-IV1". The message_format_version specifies the message
+ format version used by the broker to append messages to the logs. The
+ value of this setting is assumed to be 3.0-IV1 if the broker protocol
+ version is 3.0 or higher. By setting a particular message format
+ version, all existing messages on disk must be smaller or equal to the
+ specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
+ "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
+ "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
+ "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
+ "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
+ "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
+ "3.3-IV1", "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default
+ value is "create_time". The message_timestamp_type specifies whether to
+ use the message create time or log append time as the timestamp on a
+ message. Known values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
+ is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
+ compaction (if enabled) in relation to duplicates present in the logs.
+ For example, at 0.5, at most 50% of the log could be duplicates before
+ compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0.
+ The min_compaction_lag_ms specifies the minimum time (in ms) that a
+ message will remain uncompacted in the log. Only relevant if log
+ compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1.
+ The min_insync_replicas specifies the number of replicas that must ACK a
+ write for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False.
+ The preallocate specifies whether a file should be preallocated on disk
+ when creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is
+ 604800000. The retention_ms specifies the maximum amount of time (in ms)
+ to keep a message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is
+ 209715200. The segment_bytes specifies the maximum size of a single log
+ file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is
+ 604800000. The segment_ms specifies the period of time after which the
+ log will be forced to roll if the segment file isn't full. This ensures
+ that retention can delete or compact old data.
+ },
+ "name": "str", # Optional. The name of the Kafka topic.
+ "partitions": [
+ {
+ "consumer_groups": [
+ {
+ "group_name": "str", # Optional.
+ Name of the consumer group.
+ "offset": 0 # Optional. The current
+ offset of the consumer group.
+ }
+ ],
+ "earliest_offset": 0, # Optional. The earliest
+ consumer offset amongst consumer groups.
+ "id": 0, # Optional. An identifier for the
+ partition.
+ "in_sync_replicas": 0, # Optional. The number of
+ nodes that are in-sync (have the latest data) for the given
+ partition.
+ "size": 0 # Optional. Size of the topic partition in
+ bytes.
+ }
+ ],
+ "replication_factor": 0, # Optional. The number of nodes to
+ replicate data across the cluster.
+ "state": "str" # Optional. The state of the Kafka topic. Known
+ values are: "active", "configuring", "deleting", and "unknown".
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -115457,7 +123093,7 @@ async def update_connection_pool(
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -115465,125 +123101,16 @@ async def update_connection_pool(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- _json = body
+ if body is not None:
+ _json = body
+ else:
+ _json = None
- _request = build_databases_update_connection_pool_request(
+ _request = build_databases_create_kafka_topic_request(
database_cluster_uuid=database_cluster_uuid,
- pool_name=pool_name,
content_type=content_type,
json=_json,
- content=_content,
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- await self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [204, 404]:
- if _stream:
- await response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- deserialized = None
- response_headers = {}
- if response.status_code == 204:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
-
- return deserialized # type: ignore
-
- @distributed_trace_async
- async def delete_connection_pool(
- self, database_cluster_uuid: str, pool_name: str, **kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Delete a Connection Pool (PostgreSQL).
-
- To delete a specific connection pool for a PostgreSQL database cluster, send
- a DELETE request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``.
-
- A status of 204 will be given. This indicates that the request was processed
- successfully, but that no response body is needed.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param pool_name: The name used to identify the connection pool. Required.
- :type pool_name: str
- :return: JSON object or None
- :rtype: JSON or None
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
-
- _request = build_databases_delete_connection_pool_request(
- database_cluster_uuid=database_cluster_uuid,
- pool_name=pool_name,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -115598,15 +123125,14 @@ async def delete_connection_pool(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [201, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 201:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -115617,6 +123143,11 @@ async def delete_connection_pool(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -115634,24 +123165,26 @@ async def delete_connection_pool(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_eviction_policy(
- self, database_cluster_uuid: str, **kwargs: Any
+ async def get_kafka_topic(
+ self, database_cluster_uuid: str, topic_name: str, **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Retrieve the Eviction Policy for a Caching or Valkey Cluster.
+ """Get Topic for a Kafka Cluster.
- To retrieve the configured eviction policy for an existing Caching or Valkey cluster, send a
- GET request to ``/v2/databases/$DATABASE_ID/eviction_policy``.
- The response will be a JSON object with an ``eviction_policy`` key. This will be set to a
- string representing the eviction policy.
+ To retrieve a given topic by name from the set of a Kafka cluster's topics,
+ send a GET request to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``.
+
+ The result will be a JSON object with a ``topic`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
+ :param topic_name: The name used to identify the Kafka topic. Required.
+ :type topic_name: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -115661,16 +123194,127 @@ async def get_eviction_policy(
# response body for status code(s): 200
response == {
- "eviction_policy": "str" # A string specifying the desired eviction policy
- for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data,
- returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key,
- least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random
- order. * ``volatile_lru``"" : Evict keys with expiration only, least recently
- used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a
- random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest
- time-to-live (TTL) first. Required. Known values are: "noeviction",
- "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and
- "volatile_ttl".
+ "topic": {
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is
+ "delete". The cleanup_policy sets the retention policy to use on log
+ segments. 'delete' will discard old segments when retention time/size
+ limits are reached. 'compact' will enable log compaction, resulting in
+ retention of the latest value for each key. Known values are: "delete",
+ "compact", and "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the
+ topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value
+ is 86400000. The delete_retention_ms specifies how long (in ms) to retain
+ delete tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is
+ 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
+ deleting a file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The flush_messages specifies the number of
+ messages to accumulate on a log partition before messages are flushed to
+ disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value
+ is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
+ that a message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is
+ 4096. The index_interval_bytes specifies the number of bytes between
+ entries being added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional.
+ Default value is 9223372036854776000. The max_compaction_lag_ms specifies
+ the maximum amount of time (in ms) that a message will remain
+ uncompacted. This is only applicable if the logs are have compaction
+ enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is
+ 1048588. The max_messages_bytes specifies the largest record batch size
+ (in bytes) that can be sent to the server. This is calculated after
+ compression if compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default
+ value is True. The message_down_conversion_enable specifies whether
+ down-conversion of message formats is enabled to satisfy consumer
+ requests. When 'false', the broker will not perform conversion for
+ consumers expecting older message formats. The broker will respond with
+ an ``UNSUPPORTED_VERSION`` error for consume requests from these older
+ clients.
+ "message_format_version": "3.0-IV1", # Optional. Default
+ value is "3.0-IV1". The message_format_version specifies the message
+ format version used by the broker to append messages to the logs. The
+ value of this setting is assumed to be 3.0-IV1 if the broker protocol
+ version is 3.0 or higher. By setting a particular message format
+ version, all existing messages on disk must be smaller or equal to the
+ specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
+ "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
+ "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
+ "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
+ "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
+ "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
+ "3.3-IV1", "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default
+ value is "create_time". The message_timestamp_type specifies whether to
+ use the message create time or log append time as the timestamp on a
+ message. Known values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
+ is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
+ compaction (if enabled) in relation to duplicates present in the logs.
+ For example, at 0.5, at most 50% of the log could be duplicates before
+ compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0.
+ The min_compaction_lag_ms specifies the minimum time (in ms) that a
+ message will remain uncompacted in the log. Only relevant if log
+ compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1.
+ The min_insync_replicas specifies the number of replicas that must ACK a
+ write for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False.
+ The preallocate specifies whether a file should be preallocated on disk
+ when creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is
+ 604800000. The retention_ms specifies the maximum amount of time (in ms)
+ to keep a message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is
+ 209715200. The segment_bytes specifies the maximum size of a single log
+ file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is
+ 604800000. The segment_ms specifies the period of time after which the
+ log will be forced to roll if the segment file isn't full. This ensures
+ that retention can delete or compact old data.
+ },
+ "name": "str", # Optional. The name of the Kafka topic.
+ "partitions": [
+ {
+ "consumer_groups": [
+ {
+ "group_name": "str", # Optional.
+ Name of the consumer group.
+ "offset": 0 # Optional. The current
+ offset of the consumer group.
+ }
+ ],
+ "earliest_offset": 0, # Optional. The earliest
+ consumer offset amongst consumer groups.
+ "id": 0, # Optional. An identifier for the
+ partition.
+ "in_sync_replicas": 0, # Optional. The number of
+ nodes that are in-sync (have the latest data) for the given
+ partition.
+ "size": 0 # Optional. Size of the topic partition in
+ bytes.
+ }
+ ],
+ "replication_factor": 0, # Optional. The number of nodes to
+ replicate data across the cluster.
+ "state": "str" # Optional. The state of the Kafka topic. Known
+ values are: "active", "configuring", "deleting", and "unknown".
+ }
}
# response body for status code(s): 404
response == {
@@ -115702,8 +123346,9 @@ async def get_eviction_policy(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_eviction_policy_request(
+ _request = build_databases_get_kafka_topic_request(
database_cluster_uuid=database_cluster_uuid,
+ topic_name=topic_name,
headers=_headers,
params=_params,
)
@@ -115763,29 +123408,34 @@ async def get_eviction_policy(
return cast(JSON, deserialized) # type: ignore
@overload
- async def update_eviction_policy(
+ async def update_kafka_topic(
self,
database_cluster_uuid: str,
- body: JSON,
+ topic_name: str,
+ body: Optional[JSON] = None,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Configure the Eviction Policy for a Caching or Valkey Cluster.
+ """Update Topic for a Kafka Cluster.
- To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request
- to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy.
+ To update a topic attached to a Kafka cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``.
+
+ The result will be a JSON object with a ``topic`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Required.
+ :param topic_name: The name used to identify the Kafka topic. Required.
+ :type topic_name: str
+ :param body: Default value is None.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -115793,18 +123443,222 @@ async def update_eviction_policy(
# JSON input template you can fill out and use as your body input.
body = {
- "eviction_policy": "str" # A string specifying the desired eviction policy
- for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data,
- returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key,
- least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random
- order. * ``volatile_lru``"" : Evict keys with expiration only, least recently
- used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a
- random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest
- time-to-live (TTL) first. Required. Known values are: "noeviction",
- "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and
- "volatile_ttl".
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is "delete".
+ The cleanup_policy sets the retention policy to use on log segments. 'delete'
+ will discard old segments when retention time/size limits are reached.
+ 'compact' will enable log compaction, resulting in retention of the latest
+ value for each key. Known values are: "delete", "compact", and
+ "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the topic.
+ Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value is
+ 86400000. The delete_retention_ms specifies how long (in ms) to retain delete
+ tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is 60000.
+ The file_delete_delay_ms specifies the time (in ms) to wait before deleting a
+ file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default value is
+ 9223372036854776000. The flush_messages specifies the number of messages to
+ accumulate on a log partition before messages are flushed to disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value is
+ 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a
+ message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is 4096. The
+ index_interval_bytes specifies the number of bytes between entries being
+ added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum
+ amount of time (in ms) that a message will remain uncompacted. This is only
+ applicable if the logs are have compaction enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is 1048588.
+ The max_messages_bytes specifies the largest record batch size (in bytes)
+ that can be sent to the server. This is calculated after compression if
+ compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default value is
+ True. The message_down_conversion_enable specifies whether down-conversion of
+ message formats is enabled to satisfy consumer requests. When 'false', the
+ broker will not perform conversion for consumers expecting older message
+ formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for
+ consume requests from these older clients.
+ "message_format_version": "3.0-IV1", # Optional. Default value is
+ "3.0-IV1". The message_format_version specifies the message format version
+ used by the broker to append messages to the logs. The value of this setting
+ is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By
+ setting a particular message format version, all existing messages on disk
+ must be smaller or equal to the specified version. Known values are: "0.8.0",
+ "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0",
+ "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1",
+ "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0",
+ "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0",
+ "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1",
+ "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default value
+ is "create_time". The message_timestamp_type specifies whether to use the
+ message create time or log append time as the timestamp on a message. Known
+ values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5.
+ The min_cleanable_dirty_ratio specifies the frequency of log compaction (if
+ enabled) in relation to duplicates present in the logs. For example, at 0.5,
+ at most 50% of the log could be duplicates before compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0. The
+ min_compaction_lag_ms specifies the minimum time (in ms) that a message will
+ remain uncompacted in the log. Only relevant if log compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1. The
+ min_insync_replicas specifies the number of replicas that must ACK a write
+ for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False. The
+ preallocate specifies whether a file should be preallocated on disk when
+ creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is 604800000.
+ The retention_ms specifies the maximum amount of time (in ms) to keep a
+ message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is 209715200.
+ The segment_bytes specifies the maximum size of a single log file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is 604800000. The
+ segment_ms specifies the period of time after which the log will be forced to
+ roll if the segment file isn't full. This ensures that retention can delete
+ or compact old data.
+ },
+ "partition_count": 0, # Optional. The number of partitions available for the
+ topic. On update, this value can only be increased.
+ "replication_factor": 0 # Optional. The number of nodes to replicate data
+ across the cluster.
}
+ # response body for status code(s): 200
+ response == {
+ "topic": {
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is
+ "delete". The cleanup_policy sets the retention policy to use on log
+ segments. 'delete' will discard old segments when retention time/size
+ limits are reached. 'compact' will enable log compaction, resulting in
+ retention of the latest value for each key. Known values are: "delete",
+ "compact", and "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the
+ topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value
+ is 86400000. The delete_retention_ms specifies how long (in ms) to retain
+ delete tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is
+ 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
+ deleting a file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The flush_messages specifies the number of
+ messages to accumulate on a log partition before messages are flushed to
+ disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value
+ is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
+ that a message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is
+ 4096. The index_interval_bytes specifies the number of bytes between
+ entries being added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional.
+ Default value is 9223372036854776000. The max_compaction_lag_ms specifies
+ the maximum amount of time (in ms) that a message will remain
+ uncompacted. This is only applicable if the logs are have compaction
+ enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is
+ 1048588. The max_messages_bytes specifies the largest record batch size
+ (in bytes) that can be sent to the server. This is calculated after
+ compression if compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default
+ value is True. The message_down_conversion_enable specifies whether
+ down-conversion of message formats is enabled to satisfy consumer
+ requests. When 'false', the broker will not perform conversion for
+ consumers expecting older message formats. The broker will respond with
+ an ``UNSUPPORTED_VERSION`` error for consume requests from these older
+ clients.
+ "message_format_version": "3.0-IV1", # Optional. Default
+ value is "3.0-IV1". The message_format_version specifies the message
+ format version used by the broker to append messages to the logs. The
+ value of this setting is assumed to be 3.0-IV1 if the broker protocol
+ version is 3.0 or higher. By setting a particular message format
+ version, all existing messages on disk must be smaller or equal to the
+ specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
+ "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
+ "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
+ "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
+ "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
+ "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
+ "3.3-IV1", "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default
+ value is "create_time". The message_timestamp_type specifies whether to
+ use the message create time or log append time as the timestamp on a
+ message. Known values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
+ is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
+ compaction (if enabled) in relation to duplicates present in the logs.
+ For example, at 0.5, at most 50% of the log could be duplicates before
+ compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0.
+ The min_compaction_lag_ms specifies the minimum time (in ms) that a
+ message will remain uncompacted in the log. Only relevant if log
+ compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1.
+ The min_insync_replicas specifies the number of replicas that must ACK a
+ write for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False.
+ The preallocate specifies whether a file should be preallocated on disk
+ when creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is
+ 604800000. The retention_ms specifies the maximum amount of time (in ms)
+ to keep a message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is
+ 209715200. The segment_bytes specifies the maximum size of a single log
+ file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is
+ 604800000. The segment_ms specifies the period of time after which the
+ log will be forced to roll if the segment file isn't full. This ensures
+ that retention can delete or compact old data.
+ },
+ "name": "str", # Optional. The name of the Kafka topic.
+ "partitions": [
+ {
+ "consumer_groups": [
+ {
+ "group_name": "str", # Optional.
+ Name of the consumer group.
+ "offset": 0 # Optional. The current
+ offset of the consumer group.
+ }
+ ],
+ "earliest_offset": 0, # Optional. The earliest
+ consumer offset amongst consumer groups.
+ "id": 0, # Optional. An identifier for the
+ partition.
+ "in_sync_replicas": 0, # Optional. The number of
+ nodes that are in-sync (have the latest data) for the given
+ partition.
+ "size": 0 # Optional. Size of the topic partition in
+ bytes.
+ }
+ ],
+ "replication_factor": 0, # Optional. The number of nodes to
+ replicate data across the cluster.
+ "state": "str" # Optional. The state of the Kafka topic. Known
+ values are: "active", "configuring", "deleting", and "unknown".
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -115819,34 +123673,163 @@ async def update_eviction_policy(
"""
@overload
- async def update_eviction_policy(
+ async def update_kafka_topic(
self,
database_cluster_uuid: str,
- body: IO[bytes],
+ topic_name: str,
+ body: Optional[IO[bytes]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Configure the Eviction Policy for a Caching or Valkey Cluster.
+ """Update Topic for a Kafka Cluster.
- To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request
- to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy.
+ To update a topic attached to a Kafka cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``.
+
+ The result will be a JSON object with a ``topic`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Required.
+ :param topic_name: The name used to identify the Kafka topic. Required.
+ :type topic_name: str
+ :param body: Default value is None.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
+ # response body for status code(s): 200
+ response == {
+ "topic": {
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is
+ "delete". The cleanup_policy sets the retention policy to use on log
+ segments. 'delete' will discard old segments when retention time/size
+ limits are reached. 'compact' will enable log compaction, resulting in
+ retention of the latest value for each key. Known values are: "delete",
+ "compact", and "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the
+ topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value
+ is 86400000. The delete_retention_ms specifies how long (in ms) to retain
+ delete tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is
+ 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
+ deleting a file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The flush_messages specifies the number of
+ messages to accumulate on a log partition before messages are flushed to
+ disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value
+ is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
+ that a message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is
+ 4096. The index_interval_bytes specifies the number of bytes between
+ entries being added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional.
+ Default value is 9223372036854776000. The max_compaction_lag_ms specifies
+ the maximum amount of time (in ms) that a message will remain
+ uncompacted. This is only applicable if the logs are have compaction
+ enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is
+ 1048588. The max_messages_bytes specifies the largest record batch size
+ (in bytes) that can be sent to the server. This is calculated after
+ compression if compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default
+ value is True. The message_down_conversion_enable specifies whether
+ down-conversion of message formats is enabled to satisfy consumer
+ requests. When 'false', the broker will not perform conversion for
+ consumers expecting older message formats. The broker will respond with
+ an ``UNSUPPORTED_VERSION`` error for consume requests from these older
+ clients.
+ "message_format_version": "3.0-IV1", # Optional. Default
+ value is "3.0-IV1". The message_format_version specifies the message
+ format version used by the broker to append messages to the logs. The
+ value of this setting is assumed to be 3.0-IV1 if the broker protocol
+ version is 3.0 or higher. By setting a particular message format
+ version, all existing messages on disk must be smaller or equal to the
+ specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
+ "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
+ "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
+ "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
+ "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
+ "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
+ "3.3-IV1", "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default
+ value is "create_time". The message_timestamp_type specifies whether to
+ use the message create time or log append time as the timestamp on a
+ message. Known values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
+ is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
+ compaction (if enabled) in relation to duplicates present in the logs.
+ For example, at 0.5, at most 50% of the log could be duplicates before
+ compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0.
+ The min_compaction_lag_ms specifies the minimum time (in ms) that a
+ message will remain uncompacted in the log. Only relevant if log
+ compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1.
+ The min_insync_replicas specifies the number of replicas that must ACK a
+ write for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False.
+ The preallocate specifies whether a file should be preallocated on disk
+ when creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is
+ 604800000. The retention_ms specifies the maximum amount of time (in ms)
+ to keep a message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is
+ 209715200. The segment_bytes specifies the maximum size of a single log
+ file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is
+ 604800000. The segment_ms specifies the period of time after which the
+ log will be forced to roll if the segment file isn't full. This ensures
+ that retention can delete or compact old data.
+ },
+ "name": "str", # Optional. The name of the Kafka topic.
+ "partitions": [
+ {
+ "consumer_groups": [
+ {
+ "group_name": "str", # Optional.
+ Name of the consumer group.
+ "offset": 0 # Optional. The current
+ offset of the consumer group.
+ }
+ ],
+ "earliest_offset": 0, # Optional. The earliest
+ consumer offset amongst consumer groups.
+ "id": 0, # Optional. An identifier for the
+ partition.
+ "in_sync_replicas": 0, # Optional. The number of
+ nodes that are in-sync (have the latest data) for the given
+ partition.
+ "size": 0 # Optional. Size of the topic partition in
+ bytes.
+ }
+ ],
+ "replication_factor": 0, # Optional. The number of nodes to
+ replicate data across the cluster.
+ "state": "str" # Optional. The state of the Kafka topic. Known
+ values are: "active", "configuring", "deleting", and "unknown".
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -115861,21 +123844,29 @@ async def update_eviction_policy(
"""
@distributed_trace_async
- async def update_eviction_policy(
- self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
- ) -> Optional[JSON]:
+ async def update_kafka_topic(
+ self,
+ database_cluster_uuid: str,
+ topic_name: str,
+ body: Optional[Union[JSON, IO[bytes]]] = None,
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Configure the Eviction Policy for a Caching or Valkey Cluster.
+ """Update Topic for a Kafka Cluster.
- To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request
- to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy.
+ To update a topic attached to a Kafka cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``.
+
+ The result will be a JSON object with a ``topic`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :param topic_name: The name used to identify the Kafka topic. Required.
+ :type topic_name: str
+ :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
:type body: JSON or IO[bytes]
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -115883,18 +123874,222 @@ async def update_eviction_policy(
# JSON input template you can fill out and use as your body input.
body = {
- "eviction_policy": "str" # A string specifying the desired eviction policy
- for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data,
- returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key,
- least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random
- order. * ``volatile_lru``"" : Evict keys with expiration only, least recently
- used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a
- random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest
- time-to-live (TTL) first. Required. Known values are: "noeviction",
- "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and
- "volatile_ttl".
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is "delete".
+ The cleanup_policy sets the retention policy to use on log segments. 'delete'
+ will discard old segments when retention time/size limits are reached.
+ 'compact' will enable log compaction, resulting in retention of the latest
+ value for each key. Known values are: "delete", "compact", and
+ "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the topic.
+ Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value is
+ 86400000. The delete_retention_ms specifies how long (in ms) to retain delete
+ tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is 60000.
+ The file_delete_delay_ms specifies the time (in ms) to wait before deleting a
+ file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default value is
+ 9223372036854776000. The flush_messages specifies the number of messages to
+ accumulate on a log partition before messages are flushed to disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value is
+ 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a
+ message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is 4096. The
+ index_interval_bytes specifies the number of bytes between entries being
+ added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum
+ amount of time (in ms) that a message will remain uncompacted. This is only
+ applicable if the logs are have compaction enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is 1048588.
+ The max_messages_bytes specifies the largest record batch size (in bytes)
+ that can be sent to the server. This is calculated after compression if
+ compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default value is
+ True. The message_down_conversion_enable specifies whether down-conversion of
+ message formats is enabled to satisfy consumer requests. When 'false', the
+ broker will not perform conversion for consumers expecting older message
+ formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for
+ consume requests from these older clients.
+ "message_format_version": "3.0-IV1", # Optional. Default value is
+ "3.0-IV1". The message_format_version specifies the message format version
+ used by the broker to append messages to the logs. The value of this setting
+ is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By
+ setting a particular message format version, all existing messages on disk
+ must be smaller or equal to the specified version. Known values are: "0.8.0",
+ "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0",
+ "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1",
+ "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0",
+ "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0",
+ "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1",
+ "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default value
+ is "create_time". The message_timestamp_type specifies whether to use the
+ message create time or log append time as the timestamp on a message. Known
+ values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5.
+ The min_cleanable_dirty_ratio specifies the frequency of log compaction (if
+ enabled) in relation to duplicates present in the logs. For example, at 0.5,
+ at most 50% of the log could be duplicates before compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0. The
+ min_compaction_lag_ms specifies the minimum time (in ms) that a message will
+ remain uncompacted in the log. Only relevant if log compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1. The
+ min_insync_replicas specifies the number of replicas that must ACK a write
+ for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False. The
+ preallocate specifies whether a file should be preallocated on disk when
+ creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is 604800000.
+ The retention_ms specifies the maximum amount of time (in ms) to keep a
+ message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is 209715200.
+ The segment_bytes specifies the maximum size of a single log file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is 604800000. The
+ segment_ms specifies the period of time after which the log will be forced to
+ roll if the segment file isn't full. This ensures that retention can delete
+ or compact old data.
+ },
+ "partition_count": 0, # Optional. The number of partitions available for the
+ topic. On update, this value can only be increased.
+ "replication_factor": 0 # Optional. The number of nodes to replicate data
+ across the cluster.
}
+ # response body for status code(s): 200
+ response == {
+ "topic": {
+ "config": {
+ "cleanup_policy": "delete", # Optional. Default value is
+ "delete". The cleanup_policy sets the retention policy to use on log
+ segments. 'delete' will discard old segments when retention time/size
+ limits are reached. 'compact' will enable log compaction, resulting in
+ retention of the latest value for each key. Known values are: "delete",
+ "compact", and "compact_delete".
+ "compression_type": "producer", # Optional. Default value is
+ "producer". The compression_type specifies the compression type of the
+ topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
+ "uncompressed".
+ "delete_retention_ms": 86400000, # Optional. Default value
+ is 86400000. The delete_retention_ms specifies how long (in ms) to retain
+ delete tombstone markers for topics.
+ "file_delete_delay_ms": 60000, # Optional. Default value is
+ 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
+ deleting a file from the filesystem.
+ "flush_messages": 9223372036854776000, # Optional. Default
+ value is 9223372036854776000. The flush_messages specifies the number of
+ messages to accumulate on a log partition before messages are flushed to
+ disk.
+ "flush_ms": 9223372036854776000, # Optional. Default value
+ is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
+ that a message is kept in memory before being flushed to disk.
+ "index_interval_bytes": 4096, # Optional. Default value is
+ 4096. The index_interval_bytes specifies the number of bytes between
+ entries being added into te offset index.
+ "max_compaction_lag_ms": 9223372036854776000, # Optional.
+ Default value is 9223372036854776000. The max_compaction_lag_ms specifies
+ the maximum amount of time (in ms) that a message will remain
+ uncompacted. This is only applicable if the logs are have compaction
+ enabled.
+ "max_message_bytes": 1048588, # Optional. Default value is
+ 1048588. The max_messages_bytes specifies the largest record batch size
+ (in bytes) that can be sent to the server. This is calculated after
+ compression if compression is enabled.
+ "message_down_conversion_enable": True, # Optional. Default
+ value is True. The message_down_conversion_enable specifies whether
+ down-conversion of message formats is enabled to satisfy consumer
+ requests. When 'false', the broker will not perform conversion for
+ consumers expecting older message formats. The broker will respond with
+ an ``UNSUPPORTED_VERSION`` error for consume requests from these older
+ clients.
+ "message_format_version": "3.0-IV1", # Optional. Default
+ value is "3.0-IV1". The message_format_version specifies the message
+ format version used by the broker to append messages to the logs. The
+ value of this setting is assumed to be 3.0-IV1 if the broker protocol
+ version is 3.0 or higher. By setting a particular message format
+ version, all existing messages on disk must be smaller or equal to the
+ specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
+ "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
+ "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
+ "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
+ "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
+ "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
+ "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
+ "3.3-IV1", "3.3-IV2", and "3.3-IV3".
+ "message_timestamp_type": "create_time", # Optional. Default
+ value is "create_time". The message_timestamp_type specifies whether to
+ use the message create time or log append time as the timestamp on a
+ message. Known values are: "create_time" and "log_append_time".
+ "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
+ is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
+ compaction (if enabled) in relation to duplicates present in the logs.
+ For example, at 0.5, at most 50% of the log could be duplicates before
+ compaction would begin.
+ "min_compaction_lag_ms": 0, # Optional. Default value is 0.
+ The min_compaction_lag_ms specifies the minimum time (in ms) that a
+ message will remain uncompacted in the log. Only relevant if log
+ compaction is enabled.
+ "min_insync_replicas": 1, # Optional. Default value is 1.
+ The min_insync_replicas specifies the number of replicas that must ACK a
+ write for the write to be considered successful.
+ "preallocate": False, # Optional. Default value is False.
+ The preallocate specifies whether a file should be preallocated on disk
+ when creating a new log segment.
+ "retention_bytes": -1, # Optional. Default value is -1. The
+ retention_bytes specifies the maximum size of the log (in bytes) before
+ deleting messages. -1 indicates that there is no limit.
+ "retention_ms": 604800000, # Optional. Default value is
+ 604800000. The retention_ms specifies the maximum amount of time (in ms)
+ to keep a message before deleting it.
+ "segment_bytes": 209715200, # Optional. Default value is
+ 209715200. The segment_bytes specifies the maximum size of a single log
+ file (in bytes).
+ "segment_jitter_ms": 0, # Optional. Default value is 0. The
+ segment_jitter_ms specifies the maximum random jitter subtracted from the
+ scheduled segment roll time to avoid thundering herds of segment rolling.
+ "segment_ms": 604800000 # Optional. Default value is
+ 604800000. The segment_ms specifies the period of time after which the
+ log will be forced to roll if the segment file isn't full. This ensures
+ that retention can delete or compact old data.
+ },
+ "name": "str", # Optional. The name of the Kafka topic.
+ "partitions": [
+ {
+ "consumer_groups": [
+ {
+ "group_name": "str", # Optional.
+ Name of the consumer group.
+ "offset": 0 # Optional. The current
+ offset of the consumer group.
+ }
+ ],
+ "earliest_offset": 0, # Optional. The earliest
+ consumer offset amongst consumer groups.
+ "id": 0, # Optional. An identifier for the
+ partition.
+ "in_sync_replicas": 0, # Optional. The number of
+ nodes that are in-sync (have the latest data) for the given
+ partition.
+ "size": 0 # Optional. Size of the topic partition in
+ bytes.
+ }
+ ],
+ "replication_factor": 0, # Optional. The number of nodes to
+ replicate data across the cluster.
+ "state": "str" # Optional. The state of the Kafka topic. Known
+ values are: "active", "configuring", "deleting", and "unknown".
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -115926,7 +124121,7 @@ async def update_eviction_policy(
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -115934,10 +124129,14 @@ async def update_eviction_policy(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- _json = body
+ if body is not None:
+ _json = body
+ else:
+ _json = None
- _request = build_databases_update_eviction_policy_request(
+ _request = build_databases_update_kafka_topic_request(
database_cluster_uuid=database_cluster_uuid,
+ topic_name=topic_name,
content_type=content_type,
json=_json,
content=_content,
@@ -115955,15 +124154,14 @@ async def update_eviction_policy(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -115974,6 +124172,11 @@ async def update_eviction_policy(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -115991,34 +124194,34 @@ async def update_eviction_policy(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ async def delete_kafka_topic(
+ self, database_cluster_uuid: str, topic_name: str, **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Retrieve the SQL Modes for a MySQL Cluster.
+ """Delete Topic for a Kafka Cluster.
- To retrieve the configured SQL modes for an existing MySQL cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/sql_mode``.
- The response will be a JSON object with a ``sql_mode`` key. This will be set to a string
- representing the configured SQL modes.
+ To delete a single topic within a Kafka cluster, send a DELETE request
+ to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``.
+
+ A status of 204 will be given. This indicates that the request was
+ processed successfully, but that no response body is needed.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :return: JSON object
- :rtype: JSON
+ :param topic_name: The name used to identify the Kafka topic. Required.
+ :type topic_name: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
- response == {
- "sql_mode": "str" # A string specifying the configured SQL modes for the
- MySQL cluster. Required.
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -116047,10 +124250,11 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_databases_get_sql_mode_request(
+ _request = build_databases_delete_kafka_topic_request(
database_cluster_uuid=database_cluster_uuid,
+ topic_name=topic_name,
headers=_headers,
params=_params,
)
@@ -116065,14 +124269,15 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -116083,11 +124288,6 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
"int", response.headers.get("ratelimit-reset")
)
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -116105,136 +124305,40 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
-
- @overload
- async def update_sql_mode(
- self,
- database_cluster_uuid: str,
- body: JSON,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Update SQL Mode for a Cluster.
-
- To configure the SQL modes for an existing MySQL cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8
- documentation for a `full list of supported SQL modes
- `_.
- A successful request will receive a 204 No Content status code with no body in response.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Required.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "sql_mode": "str" # A string specifying the configured SQL modes for the
- MySQL cluster. Required.
- }
-
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
-
- @overload
- async def update_sql_mode(
- self,
- database_cluster_uuid: str,
- body: IO[bytes],
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Update SQL Mode for a Cluster.
-
- To configure the SQL modes for an existing MySQL cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8
- documentation for a `full list of supported SQL modes
- `_.
- A successful request will receive a 204 No Content status code with no body in response.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Required.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
+ return deserialized # type: ignore
@distributed_trace_async
- async def update_sql_mode(
- self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
- ) -> Optional[JSON]:
+ async def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Update SQL Mode for a Cluster.
+ """List Logsinks for a Database Cluster.
- To configure the SQL modes for an existing MySQL cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8
- documentation for a `full list of supported SQL modes
- `_.
- A successful request will receive a 204 No Content status code with no body in response.
+ To list logsinks for a database cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/logsink``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
- :type body: JSON or IO[bytes]
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "sql_mode": "str" # A string specifying the configured SQL modes for the
- MySQL cluster. Required.
+ # response body for status code(s): 200
+ response == {
+ "sinks": [
+ {
+ "config": {},
+ "sink_id": "str", # Optional. A unique identifier for
+ Logsink.
+ "sink_name": "str", # Optional. The name of the Logsink.
+ "sink_type": "str" # Optional. Known values are: "rsyslog",
+ "elasticsearch", and "opensearch".
+ }
+ ]
}
-
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -116260,27 +124364,13 @@ async def update_sql_mode(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
-
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- _json = body
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_update_sql_mode_request(
+ _request = build_databases_list_logsink_request(
database_cluster_uuid=database_cluster_uuid,
- content_type=content_type,
- json=_json,
- content=_content,
headers=_headers,
params=_params,
)
@@ -116295,15 +124385,14 @@ async def update_sql_mode(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -116314,6 +124403,11 @@ async def update_sql_mode(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -116331,25 +124425,24 @@ async def update_sql_mode(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@overload
- async def update_major_version(
+ async def create_logsink(
self,
database_cluster_uuid: str,
body: JSON,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Upgrade Major Version for a Database.
+ """Create Logsink for a Database Cluster.
- To upgrade the major version of a database, send a PUT request to
- ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version.
- A successful request will receive a 204 No Content status code with no body in response.
+ To create logsink for a database cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/logsink``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -116358,8 +124451,8 @@ async def update_major_version(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -116367,10 +124460,26 @@ async def update_major_version(
# JSON input template you can fill out and use as your body input.
body = {
- "version": "str" # Optional. A string representing the version of the
- database engine in use for the cluster.
+ "config": {},
+ "sink_name": "str", # Optional. The name of the Logsink.
+ "sink_type": "str" # Optional. Type of logsink integration. * Use
+ ``datadog`` for Datadog integration **only with MongoDB clusters**. * For
+ non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other
+ supported types include ``elasticsearch`` and ``opensearch``. More details about
+ the configuration can be found in the ``config`` property. Known values are:
+ "rsyslog", "elasticsearch", "opensearch", and "datadog".
}
+ # response body for status code(s): 201
+ response == {
+ "sink": {
+ "config": {},
+ "sink_id": "str", # Optional. A unique identifier for Logsink.
+ "sink_name": "str", # Optional. The name of the Logsink.
+ "sink_type": "str" # Optional. Known values are: "rsyslog",
+ "elasticsearch", and "opensearch".
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -116385,20 +124494,19 @@ async def update_major_version(
"""
@overload
- async def update_major_version(
+ async def create_logsink(
self,
database_cluster_uuid: str,
body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Upgrade Major Version for a Database.
+ """Create Logsink for a Database Cluster.
- To upgrade the major version of a database, send a PUT request to
- ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version.
- A successful request will receive a 204 No Content status code with no body in response.
+ To create logsink for a database cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/logsink``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -116407,13 +124515,23 @@ async def update_major_version(
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
+ # response body for status code(s): 201
+ response == {
+ "sink": {
+ "config": {},
+ "sink_id": "str", # Optional. A unique identifier for Logsink.
+ "sink_name": "str", # Optional. The name of the Logsink.
+ "sink_type": "str" # Optional. Known values are: "rsyslog",
+ "elasticsearch", and "opensearch".
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -116428,22 +124546,21 @@ async def update_major_version(
"""
@distributed_trace_async
- async def update_major_version(
+ async def create_logsink(
self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Upgrade Major Version for a Database.
+ """Create Logsink for a Database Cluster.
- To upgrade the major version of a database, send a PUT request to
- ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version.
- A successful request will receive a 204 No Content status code with no body in response.
+ To create logsink for a database cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/logsink``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -116451,10 +124568,26 @@ async def update_major_version(
# JSON input template you can fill out and use as your body input.
body = {
- "version": "str" # Optional. A string representing the version of the
- database engine in use for the cluster.
+ "config": {},
+ "sink_name": "str", # Optional. The name of the Logsink.
+ "sink_type": "str" # Optional. Type of logsink integration. * Use
+ ``datadog`` for Datadog integration **only with MongoDB clusters**. * For
+ non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other
+ supported types include ``elasticsearch`` and ``opensearch``. More details about
+ the configuration can be found in the ``config`` property. Known values are:
+ "rsyslog", "elasticsearch", "opensearch", and "datadog".
}
+ # response body for status code(s): 201
+ response == {
+ "sink": {
+ "config": {},
+ "sink_id": "str", # Optional. A unique identifier for Logsink.
+ "sink_name": "str", # Optional. The name of the Logsink.
+ "sink_type": "str" # Optional. Known values are: "rsyslog",
+ "elasticsearch", and "opensearch".
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -116486,7 +124619,7 @@ async def update_major_version(
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -116496,7 +124629,7 @@ async def update_major_version(
else:
_json = body
- _request = build_databases_update_major_version_request(
+ _request = build_databases_create_logsink_request(
database_cluster_uuid=database_cluster_uuid,
content_type=content_type,
json=_json,
@@ -116515,15 +124648,14 @@ async def update_major_version(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [201, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 201:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -116534,6 +124666,11 @@ async def update_major_version(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -116551,21 +124688,24 @@ async def update_major_version(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ async def get_logsink(
+ self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Retrieve Autoscale Configuration for a Database Cluster.
+ """Get Logsink for a Database Cluster.
- To retrieve the autoscale configuration for an existing database cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/autoscale``.
- The response will be a JSON object with autoscaling configuration details.
+ To get a logsink for a database cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
+ :param logsink_id: A unique identifier for a logsink of a database cluster. Required.
+ :type logsink_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -116575,17 +124715,11 @@ async def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON
# response body for status code(s): 200
response == {
- "autoscale": {
- "storage": {
- "enabled": bool, # Whether storage autoscaling is enabled
- for the cluster. Required.
- "increment_gib": 0, # Optional. The amount of additional
- storage to add (in GiB) when autoscaling is triggered.
- "threshold_percent": 0 # Optional. The storage usage
- threshold percentage that triggers autoscaling. When storage usage
- exceeds this percentage, additional storage will be added automatically.
- }
- }
+ "config": {},
+ "sink_id": "str", # Optional. A unique identifier for Logsink.
+ "sink_name": "str", # Optional. The name of the Logsink.
+ "sink_type": "str" # Optional. Known values are: "rsyslog", "elasticsearch",
+ and "opensearch".
}
# response body for status code(s): 404
response == {
@@ -116617,8 +124751,9 @@ async def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_autoscale_request(
+ _request = build_databases_get_logsink_request(
database_cluster_uuid=database_cluster_uuid,
+ logsink_id=logsink_id,
headers=_headers,
params=_params,
)
@@ -116678,23 +124813,25 @@ async def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON
return cast(JSON, deserialized) # type: ignore
@overload
- async def update_autoscale(
+ async def update_logsink(
self,
database_cluster_uuid: str,
+ logsink_id: str,
body: JSON,
*,
content_type: str = "application/json",
**kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Configure Autoscale Settings for a Database Cluster.
+ """Update Logsink for a Database Cluster.
- To configure autoscale settings for an existing database cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration.
- A successful request will receive a 204 No Content status code with no body in response.
+ To update a logsink for a database cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
+ :param logsink_id: A unique identifier for a logsink of a database cluster. Required.
+ :type logsink_id: str
:param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
@@ -116709,18 +124846,10 @@ async def update_autoscale(
# JSON input template you can fill out and use as your body input.
body = {
- "storage": {
- "enabled": bool, # Whether storage autoscaling is enabled for the
- cluster. Required.
- "increment_gib": 0, # Optional. The amount of additional storage to
- add (in GiB) when autoscaling is triggered.
- "threshold_percent": 0 # Optional. The storage usage threshold
- percentage that triggers autoscaling. When storage usage exceeds this
- percentage, additional storage will be added automatically.
- }
+ "config": {}
}
- # response body for status code(s): 404, 422
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -116734,23 +124863,25 @@ async def update_autoscale(
"""
@overload
- async def update_autoscale(
+ async def update_logsink(
self,
database_cluster_uuid: str,
+ logsink_id: str,
body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Configure Autoscale Settings for a Database Cluster.
+ """Update Logsink for a Database Cluster.
- To configure autoscale settings for an existing database cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration.
- A successful request will receive a 204 No Content status code with no body in response.
+ To update a logsink for a database cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
+ :param logsink_id: A unique identifier for a logsink of a database cluster. Required.
+ :type logsink_id: str
:param body: Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
@@ -116763,7 +124894,7 @@ async def update_autoscale(
Example:
.. code-block:: python
- # response body for status code(s): 404, 422
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -116777,18 +124908,23 @@ async def update_autoscale(
"""
@distributed_trace_async
- async def update_autoscale(
- self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ async def update_logsink(
+ self,
+ database_cluster_uuid: str,
+ logsink_id: str,
+ body: Union[JSON, IO[bytes]],
+ **kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Configure Autoscale Settings for a Database Cluster.
+ """Update Logsink for a Database Cluster.
- To configure autoscale settings for an existing database cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration.
- A successful request will receive a 204 No Content status code with no body in response.
+ To update a logsink for a database cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
+ :param logsink_id: A unique identifier for a logsink of a database cluster. Required.
+ :type logsink_id: str
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
:return: JSON object or None
@@ -116800,18 +124936,10 @@ async def update_autoscale(
# JSON input template you can fill out and use as your body input.
body = {
- "storage": {
- "enabled": bool, # Whether storage autoscaling is enabled for the
- cluster. Required.
- "increment_gib": 0, # Optional. The amount of additional storage to
- add (in GiB) when autoscaling is triggered.
- "threshold_percent": 0 # Optional. The storage usage threshold
- percentage that triggers autoscaling. When storage usage exceeds this
- percentage, additional storage will be added automatically.
- }
+ "config": {}
}
- # response body for status code(s): 404, 422
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -116852,8 +124980,9 @@ async def update_autoscale(
else:
_json = body
- _request = build_databases_update_autoscale_request(
+ _request = build_databases_update_logsink_request(
database_cluster_uuid=database_cluster_uuid,
+ logsink_id=logsink_id,
content_type=content_type,
json=_json,
content=_content,
@@ -116871,7 +125000,7 @@ async def update_autoscale(
response = pipeline_response.http_response
- if response.status_code not in [204, 404, 422]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -116879,7 +125008,7 @@ async def update_autoscale(
deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -116906,7 +125035,99 @@ async def update_autoscale(
else:
deserialized = None
- if response.status_code == 422:
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def delete_logsink(
+ self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Delete Logsink for a Database Cluster.
+
+ To delete a logsink for a database cluster, send a DELETE request to
+ ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param logsink_id: A unique identifier for a logsink of a database cluster. Required.
+ :type logsink_id: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+
+ _request = build_databases_delete_logsink_request(
+ database_cluster_uuid=database_cluster_uuid,
+ logsink_id=logsink_id,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -116928,16 +125149,14 @@ async def update_autoscale(
return deserialized # type: ignore
@distributed_trace_async
- async def list_kafka_topics(
+ async def list_kafka_schemas(
self, database_cluster_uuid: str, **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """List Topics for a Kafka Cluster.
-
- To list all of a Kafka cluster's topics, send a GET request to
- ``/v2/databases/$DATABASE_ID/topics``.
+ """List Schemas for Kafka Cluster.
- The result will be a JSON object with a ``topics`` key.
+ To list all schemas for a Kafka cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/schema-registry``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
@@ -116950,15 +125169,15 @@ async def list_kafka_topics(
# response body for status code(s): 200
response == {
- "topics": [
+ "subjects": [
{
- "name": "str", # Optional. The name of the Kafka topic.
- "partition_count": 0, # Optional. The number of partitions
- available for the topic. On update, this value can only be increased.
- "replication_factor": 0, # Optional. The number of nodes to
- replicate data across the cluster.
- "state": "str" # Optional. The state of the Kafka topic.
- Known values are: "active", "configuring", "deleting", and "unknown".
+ "schema": "str", # Optional. The schema definition in the
+ specified format.
+ "schema_id": 0, # Optional. The id for schema.
+ "schema_type": "str", # Optional. The type of the schema.
+ Known values are: "AVRO", "JSON", and "PROTOBUF".
+ "subject_name": "str" # Optional. The name of the schema
+ subject.
}
]
}
@@ -116992,7 +125211,7 @@ async def list_kafka_topics(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_list_kafka_topics_request(
+ _request = build_databases_list_kafka_schemas_request(
database_cluster_uuid=database_cluster_uuid,
headers=_headers,
params=_params,
@@ -117053,680 +125272,149 @@ async def list_kafka_topics(
return cast(JSON, deserialized) # type: ignore
@overload
- async def create_kafka_topic(
+ async def create_kafka_schema(
self,
database_cluster_uuid: str,
- body: Optional[JSON] = None,
+ body: JSON,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create Topic for a Kafka Cluster.
-
- To create a topic attached to a Kafka cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/topics``.
+ """Create Schema Registry for Kafka Cluster.
- The result will be a JSON object with a ``topic`` key.
+ To create a Kafka schema for a database cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/schema-registry``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Default value is None.
+ :param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: JSON object
:rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is "delete".
- The cleanup_policy sets the retention policy to use on log segments. 'delete'
- will discard old segments when retention time/size limits are reached.
- 'compact' will enable log compaction, resulting in retention of the latest
- value for each key. Known values are: "delete", "compact", and
- "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the topic.
- Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value is
- 86400000. The delete_retention_ms specifies how long (in ms) to retain delete
- tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is 60000.
- The file_delete_delay_ms specifies the time (in ms) to wait before deleting a
- file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default value is
- 9223372036854776000. The flush_messages specifies the number of messages to
- accumulate on a log partition before messages are flushed to disk.
- "flush_ms": 9223372036854776000, # Optional. Default value is
- 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a
- message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is 4096. The
- index_interval_bytes specifies the number of bytes between entries being
- added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum
- amount of time (in ms) that a message will remain uncompacted. This is only
- applicable if the logs are have compaction enabled.
- "max_message_bytes": 1048588, # Optional. Default value is 1048588.
- The max_messages_bytes specifies the largest record batch size (in bytes)
- that can be sent to the server. This is calculated after compression if
- compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default value is
- True. The message_down_conversion_enable specifies whether down-conversion of
- message formats is enabled to satisfy consumer requests. When 'false', the
- broker will not perform conversion for consumers expecting older message
- formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for
- consume requests from these older clients.
- "message_format_version": "3.0-IV1", # Optional. Default value is
- "3.0-IV1". The message_format_version specifies the message format version
- used by the broker to append messages to the logs. The value of this setting
- is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By
- setting a particular message format version, all existing messages on disk
- must be smaller or equal to the specified version. Known values are: "0.8.0",
- "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0",
- "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1",
- "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0",
- "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0",
- "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1",
- "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default value
- is "create_time". The message_timestamp_type specifies whether to use the
- message create time or log append time as the timestamp on a message. Known
- values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5.
- The min_cleanable_dirty_ratio specifies the frequency of log compaction (if
- enabled) in relation to duplicates present in the logs. For example, at 0.5,
- at most 50% of the log could be duplicates before compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0. The
- min_compaction_lag_ms specifies the minimum time (in ms) that a message will
- remain uncompacted in the log. Only relevant if log compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1. The
- min_insync_replicas specifies the number of replicas that must ACK a write
- for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False. The
- preallocate specifies whether a file should be preallocated on disk when
- creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is 604800000.
- The retention_ms specifies the maximum amount of time (in ms) to keep a
- message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is 209715200.
- The segment_bytes specifies the maximum size of a single log file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is 604800000. The
- segment_ms specifies the period of time after which the log will be forced to
- roll if the segment file isn't full. This ensures that retention can delete
- or compact old data.
- },
- "name": "str", # Optional. The name of the Kafka topic.
- "partition_count": 0, # Optional. The number of partitions available for the
- topic. On update, this value can only be increased.
- "replication_factor": 0 # Optional. The number of nodes to replicate data
- across the cluster.
- }
-
- # response body for status code(s): 201
- response == {
- "topic": {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is
- "delete". The cleanup_policy sets the retention policy to use on log
- segments. 'delete' will discard old segments when retention time/size
- limits are reached. 'compact' will enable log compaction, resulting in
- retention of the latest value for each key. Known values are: "delete",
- "compact", and "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the
- topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value
- is 86400000. The delete_retention_ms specifies how long (in ms) to retain
- delete tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is
- 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
- deleting a file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The flush_messages specifies the number of
- messages to accumulate on a log partition before messages are flushed to
- disk.
- "flush_ms": 9223372036854776000, # Optional. Default value
- is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
- that a message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is
- 4096. The index_interval_bytes specifies the number of bytes between
- entries being added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional.
- Default value is 9223372036854776000. The max_compaction_lag_ms specifies
- the maximum amount of time (in ms) that a message will remain
- uncompacted. This is only applicable if the logs are have compaction
- enabled.
- "max_message_bytes": 1048588, # Optional. Default value is
- 1048588. The max_messages_bytes specifies the largest record batch size
- (in bytes) that can be sent to the server. This is calculated after
- compression if compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default
- value is True. The message_down_conversion_enable specifies whether
- down-conversion of message formats is enabled to satisfy consumer
- requests. When 'false', the broker will not perform conversion for
- consumers expecting older message formats. The broker will respond with
- an ``UNSUPPORTED_VERSION`` error for consume requests from these older
- clients.
- "message_format_version": "3.0-IV1", # Optional. Default
- value is "3.0-IV1". The message_format_version specifies the message
- format version used by the broker to append messages to the logs. The
- value of this setting is assumed to be 3.0-IV1 if the broker protocol
- version is 3.0 or higher. By setting a particular message format
- version, all existing messages on disk must be smaller or equal to the
- specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
- "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
- "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
- "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
- "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
- "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
- "3.3-IV1", "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default
- value is "create_time". The message_timestamp_type specifies whether to
- use the message create time or log append time as the timestamp on a
- message. Known values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
- is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
- compaction (if enabled) in relation to duplicates present in the logs.
- For example, at 0.5, at most 50% of the log could be duplicates before
- compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0.
- The min_compaction_lag_ms specifies the minimum time (in ms) that a
- message will remain uncompacted in the log. Only relevant if log
- compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1.
- The min_insync_replicas specifies the number of replicas that must ACK a
- write for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False.
- The preallocate specifies whether a file should be preallocated on disk
- when creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is
- 604800000. The retention_ms specifies the maximum amount of time (in ms)
- to keep a message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is
- 209715200. The segment_bytes specifies the maximum size of a single log
- file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is
- 604800000. The segment_ms specifies the period of time after which the
- log will be forced to roll if the segment file isn't full. This ensures
- that retention can delete or compact old data.
- },
- "name": "str", # Optional. The name of the Kafka topic.
- "partitions": [
- {
- "consumer_groups": [
- {
- "group_name": "str", # Optional.
- Name of the consumer group.
- "offset": 0 # Optional. The current
- offset of the consumer group.
- }
- ],
- "earliest_offset": 0, # Optional. The earliest
- consumer offset amongst consumer groups.
- "id": 0, # Optional. An identifier for the
- partition.
- "in_sync_replicas": 0, # Optional. The number of
- nodes that are in-sync (have the latest data) for the given
- partition.
- "size": 0 # Optional. Size of the topic partition in
- bytes.
- }
- ],
- "replication_factor": 0, # Optional. The number of nodes to
- replicate data across the cluster.
- "state": "str" # Optional. The state of the Kafka topic. Known
- values are: "active", "configuring", "deleting", and "unknown".
- }
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
-
- @overload
- async def create_kafka_topic(
- self,
- database_cluster_uuid: str,
- body: Optional[IO[bytes]] = None,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> JSON:
- # pylint: disable=line-too-long
- """Create Topic for a Kafka Cluster.
-
- To create a topic attached to a Kafka cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/topics``.
-
- The result will be a JSON object with a ``topic`` key.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Default value is None.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # response body for status code(s): 201
- response == {
- "topic": {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is
- "delete". The cleanup_policy sets the retention policy to use on log
- segments. 'delete' will discard old segments when retention time/size
- limits are reached. 'compact' will enable log compaction, resulting in
- retention of the latest value for each key. Known values are: "delete",
- "compact", and "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the
- topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value
- is 86400000. The delete_retention_ms specifies how long (in ms) to retain
- delete tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is
- 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
- deleting a file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The flush_messages specifies the number of
- messages to accumulate on a log partition before messages are flushed to
- disk.
- "flush_ms": 9223372036854776000, # Optional. Default value
- is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
- that a message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is
- 4096. The index_interval_bytes specifies the number of bytes between
- entries being added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional.
- Default value is 9223372036854776000. The max_compaction_lag_ms specifies
- the maximum amount of time (in ms) that a message will remain
- uncompacted. This is only applicable if the logs are have compaction
- enabled.
- "max_message_bytes": 1048588, # Optional. Default value is
- 1048588. The max_messages_bytes specifies the largest record batch size
- (in bytes) that can be sent to the server. This is calculated after
- compression if compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default
- value is True. The message_down_conversion_enable specifies whether
- down-conversion of message formats is enabled to satisfy consumer
- requests. When 'false', the broker will not perform conversion for
- consumers expecting older message formats. The broker will respond with
- an ``UNSUPPORTED_VERSION`` error for consume requests from these older
- clients.
- "message_format_version": "3.0-IV1", # Optional. Default
- value is "3.0-IV1". The message_format_version specifies the message
- format version used by the broker to append messages to the logs. The
- value of this setting is assumed to be 3.0-IV1 if the broker protocol
- version is 3.0 or higher. By setting a particular message format
- version, all existing messages on disk must be smaller or equal to the
- specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
- "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
- "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
- "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
- "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
- "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
- "3.3-IV1", "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default
- value is "create_time". The message_timestamp_type specifies whether to
- use the message create time or log append time as the timestamp on a
- message. Known values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
- is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
- compaction (if enabled) in relation to duplicates present in the logs.
- For example, at 0.5, at most 50% of the log could be duplicates before
- compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0.
- The min_compaction_lag_ms specifies the minimum time (in ms) that a
- message will remain uncompacted in the log. Only relevant if log
- compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1.
- The min_insync_replicas specifies the number of replicas that must ACK a
- write for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False.
- The preallocate specifies whether a file should be preallocated on disk
- when creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is
- 604800000. The retention_ms specifies the maximum amount of time (in ms)
- to keep a message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is
- 209715200. The segment_bytes specifies the maximum size of a single log
- file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is
- 604800000. The segment_ms specifies the period of time after which the
- log will be forced to roll if the segment file isn't full. This ensures
- that retention can delete or compact old data.
- },
- "name": "str", # Optional. The name of the Kafka topic.
- "partitions": [
- {
- "consumer_groups": [
- {
- "group_name": "str", # Optional.
- Name of the consumer group.
- "offset": 0 # Optional. The current
- offset of the consumer group.
- }
- ],
- "earliest_offset": 0, # Optional. The earliest
- consumer offset amongst consumer groups.
- "id": 0, # Optional. An identifier for the
- partition.
- "in_sync_replicas": 0, # Optional. The number of
- nodes that are in-sync (have the latest data) for the given
- partition.
- "size": 0 # Optional. Size of the topic partition in
- bytes.
- }
- ],
- "replication_factor": 0, # Optional. The number of nodes to
- replicate data across the cluster.
- "state": "str" # Optional. The state of the Kafka topic. Known
- values are: "active", "configuring", "deleting", and "unknown".
- }
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
-
- @distributed_trace_async
- async def create_kafka_topic(
- self,
- database_cluster_uuid: str,
- body: Optional[Union[JSON, IO[bytes]]] = None,
- **kwargs: Any
- ) -> JSON:
- # pylint: disable=line-too-long
- """Create Topic for a Kafka Cluster.
-
- To create a topic attached to a Kafka cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/topics``.
-
- The result will be a JSON object with a ``topic`` key.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
- :type body: JSON or IO[bytes]
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is "delete".
- The cleanup_policy sets the retention policy to use on log segments. 'delete'
- will discard old segments when retention time/size limits are reached.
- 'compact' will enable log compaction, resulting in retention of the latest
- value for each key. Known values are: "delete", "compact", and
- "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the topic.
- Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value is
- 86400000. The delete_retention_ms specifies how long (in ms) to retain delete
- tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is 60000.
- The file_delete_delay_ms specifies the time (in ms) to wait before deleting a
- file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default value is
- 9223372036854776000. The flush_messages specifies the number of messages to
- accumulate on a log partition before messages are flushed to disk.
- "flush_ms": 9223372036854776000, # Optional. Default value is
- 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a
- message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is 4096. The
- index_interval_bytes specifies the number of bytes between entries being
- added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum
- amount of time (in ms) that a message will remain uncompacted. This is only
- applicable if the logs are have compaction enabled.
- "max_message_bytes": 1048588, # Optional. Default value is 1048588.
- The max_messages_bytes specifies the largest record batch size (in bytes)
- that can be sent to the server. This is calculated after compression if
- compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default value is
- True. The message_down_conversion_enable specifies whether down-conversion of
- message formats is enabled to satisfy consumer requests. When 'false', the
- broker will not perform conversion for consumers expecting older message
- formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for
- consume requests from these older clients.
- "message_format_version": "3.0-IV1", # Optional. Default value is
- "3.0-IV1". The message_format_version specifies the message format version
- used by the broker to append messages to the logs. The value of this setting
- is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By
- setting a particular message format version, all existing messages on disk
- must be smaller or equal to the specified version. Known values are: "0.8.0",
- "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0",
- "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1",
- "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0",
- "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0",
- "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1",
- "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default value
- is "create_time". The message_timestamp_type specifies whether to use the
- message create time or log append time as the timestamp on a message. Known
- values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5.
- The min_cleanable_dirty_ratio specifies the frequency of log compaction (if
- enabled) in relation to duplicates present in the logs. For example, at 0.5,
- at most 50% of the log could be duplicates before compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0. The
- min_compaction_lag_ms specifies the minimum time (in ms) that a message will
- remain uncompacted in the log. Only relevant if log compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1. The
- min_insync_replicas specifies the number of replicas that must ACK a write
- for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False. The
- preallocate specifies whether a file should be preallocated on disk when
- creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is 604800000.
- The retention_ms specifies the maximum amount of time (in ms) to keep a
- message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is 209715200.
- The segment_bytes specifies the maximum size of a single log file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is 604800000. The
- segment_ms specifies the period of time after which the log will be forced to
- roll if the segment file isn't full. This ensures that retention can delete
- or compact old data.
- },
- "name": "str", # Optional. The name of the Kafka topic.
- "partition_count": 0, # Optional. The number of partitions available for the
- topic. On update, this value can only be increased.
- "replication_factor": 0 # Optional. The number of nodes to replicate data
- across the cluster.
- }
-
- # response body for status code(s): 201
- response == {
- "topic": {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is
- "delete". The cleanup_policy sets the retention policy to use on log
- segments. 'delete' will discard old segments when retention time/size
- limits are reached. 'compact' will enable log compaction, resulting in
- retention of the latest value for each key. Known values are: "delete",
- "compact", and "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the
- topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value
- is 86400000. The delete_retention_ms specifies how long (in ms) to retain
- delete tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is
- 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
- deleting a file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The flush_messages specifies the number of
- messages to accumulate on a log partition before messages are flushed to
- disk.
- "flush_ms": 9223372036854776000, # Optional. Default value
- is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
- that a message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is
- 4096. The index_interval_bytes specifies the number of bytes between
- entries being added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional.
- Default value is 9223372036854776000. The max_compaction_lag_ms specifies
- the maximum amount of time (in ms) that a message will remain
- uncompacted. This is only applicable if the logs are have compaction
- enabled.
- "max_message_bytes": 1048588, # Optional. Default value is
- 1048588. The max_messages_bytes specifies the largest record batch size
- (in bytes) that can be sent to the server. This is calculated after
- compression if compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default
- value is True. The message_down_conversion_enable specifies whether
- down-conversion of message formats is enabled to satisfy consumer
- requests. When 'false', the broker will not perform conversion for
- consumers expecting older message formats. The broker will respond with
- an ``UNSUPPORTED_VERSION`` error for consume requests from these older
- clients.
- "message_format_version": "3.0-IV1", # Optional. Default
- value is "3.0-IV1". The message_format_version specifies the message
- format version used by the broker to append messages to the logs. The
- value of this setting is assumed to be 3.0-IV1 if the broker protocol
- version is 3.0 or higher. By setting a particular message format
- version, all existing messages on disk must be smaller or equal to the
- specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
- "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
- "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
- "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
- "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
- "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
- "3.3-IV1", "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default
- value is "create_time". The message_timestamp_type specifies whether to
- use the message create time or log append time as the timestamp on a
- message. Known values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
- is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
- compaction (if enabled) in relation to duplicates present in the logs.
- For example, at 0.5, at most 50% of the log could be duplicates before
- compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0.
- The min_compaction_lag_ms specifies the minimum time (in ms) that a
- message will remain uncompacted in the log. Only relevant if log
- compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1.
- The min_insync_replicas specifies the number of replicas that must ACK a
- write for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False.
- The preallocate specifies whether a file should be preallocated on disk
- when creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is
- 604800000. The retention_ms specifies the maximum amount of time (in ms)
- to keep a message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is
- 209715200. The segment_bytes specifies the maximum size of a single log
- file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is
- 604800000. The segment_ms specifies the period of time after which the
- log will be forced to roll if the segment file isn't full. This ensures
- that retention can delete or compact old data.
- },
- "name": "str", # Optional. The name of the Kafka topic.
- "partitions": [
- {
- "consumer_groups": [
- {
- "group_name": "str", # Optional.
- Name of the consumer group.
- "offset": 0 # Optional. The current
- offset of the consumer group.
- }
- ],
- "earliest_offset": 0, # Optional. The earliest
- consumer offset amongst consumer groups.
- "id": 0, # Optional. An identifier for the
- partition.
- "in_sync_replicas": 0, # Optional. The number of
- nodes that are in-sync (have the latest data) for the given
- partition.
- "size": 0 # Optional. Size of the topic partition in
- bytes.
- }
- ],
- "replication_factor": 0, # Optional. The number of nodes to
- replicate data across the cluster.
- "state": "str" # Optional. The state of the Kafka topic. Known
- values are: "active", "configuring", "deleting", and "unknown".
- }
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "schema": "str", # Optional. The schema definition in the specified format.
+ "schema_type": "str", # Optional. The type of the schema. Known values are:
+ "AVRO", "JSON", and "PROTOBUF".
+ "subject_name": "str" # Optional. The name of the schema subject.
+ }
+
+ # response body for status code(s): 201
+ response == {
+ "schema": "str", # Optional. The schema definition in the specified format.
+ "schema_id": 0, # Optional. The id for schema.
+ "schema_type": "str", # Optional. The type of the schema. Known values are:
+ "AVRO", "JSON", and "PROTOBUF".
+ "subject_name": "str" # Optional. The name of the schema subject.
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def create_kafka_schema(
+ self,
+ database_cluster_uuid: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Create Schema Registry for Kafka Cluster.
+
+ To create a Kafka schema for a database cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/schema-registry``.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 201
+ response == {
+ "schema": "str", # Optional. The schema definition in the specified format.
+ "schema_id": 0, # Optional. The id for schema.
+ "schema_type": "str", # Optional. The type of the schema. Known values are:
+ "AVRO", "JSON", and "PROTOBUF".
+ "subject_name": "str" # Optional. The name of the schema subject.
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def create_kafka_schema(
+ self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Create Schema Registry for Kafka Cluster.
+
+ To create a Kafka schema for a database cluster, send a POST request to
+ ``/v2/databases/$DATABASE_ID/schema-registry``.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "schema": "str", # Optional. The schema definition in the specified format.
+ "schema_type": "str", # Optional. The type of the schema. Known values are:
+ "AVRO", "JSON", and "PROTOBUF".
+ "subject_name": "str" # Optional. The name of the schema subject.
+ }
+
+ # response body for status code(s): 201
+ response == {
+ "schema": "str", # Optional. The schema definition in the specified format.
+ "schema_id": 0, # Optional. The id for schema.
+ "schema_type": "str", # Optional. The type of the schema. Known values are:
+ "AVRO", "JSON", and "PROTOBUF".
+ "subject_name": "str" # Optional. The name of the schema subject.
}
# response body for status code(s): 404
response == {
@@ -117767,12 +125455,9 @@ async def create_kafka_topic(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- if body is not None:
- _json = body
- else:
- _json = None
+ _json = body
- _request = build_databases_create_kafka_topic_request(
+ _request = build_databases_create_kafka_schema_request(
database_cluster_uuid=database_cluster_uuid,
content_type=content_type,
json=_json,
@@ -117836,21 +125521,19 @@ async def create_kafka_topic(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_kafka_topic(
- self, database_cluster_uuid: str, topic_name: str, **kwargs: Any
+ async def get_kafka_schema(
+ self, database_cluster_uuid: str, subject_name: str, **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Get Topic for a Kafka Cluster.
-
- To retrieve a given topic by name from the set of a Kafka cluster's topics,
- send a GET request to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``.
+ """Get a Kafka Schema by Subject Name.
- The result will be a JSON object with a ``topic`` key.
+ To get a specific schema by subject name for a Kafka cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param topic_name: The name used to identify the Kafka topic. Required.
- :type topic_name: str
+ :param subject_name: The name of the Kafka schema subject. Required.
+ :type subject_name: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -117860,127 +125543,12 @@ async def get_kafka_topic(
# response body for status code(s): 200
response == {
- "topic": {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is
- "delete". The cleanup_policy sets the retention policy to use on log
- segments. 'delete' will discard old segments when retention time/size
- limits are reached. 'compact' will enable log compaction, resulting in
- retention of the latest value for each key. Known values are: "delete",
- "compact", and "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the
- topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value
- is 86400000. The delete_retention_ms specifies how long (in ms) to retain
- delete tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is
- 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
- deleting a file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The flush_messages specifies the number of
- messages to accumulate on a log partition before messages are flushed to
- disk.
- "flush_ms": 9223372036854776000, # Optional. Default value
- is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
- that a message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is
- 4096. The index_interval_bytes specifies the number of bytes between
- entries being added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional.
- Default value is 9223372036854776000. The max_compaction_lag_ms specifies
- the maximum amount of time (in ms) that a message will remain
- uncompacted. This is only applicable if the logs are have compaction
- enabled.
- "max_message_bytes": 1048588, # Optional. Default value is
- 1048588. The max_messages_bytes specifies the largest record batch size
- (in bytes) that can be sent to the server. This is calculated after
- compression if compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default
- value is True. The message_down_conversion_enable specifies whether
- down-conversion of message formats is enabled to satisfy consumer
- requests. When 'false', the broker will not perform conversion for
- consumers expecting older message formats. The broker will respond with
- an ``UNSUPPORTED_VERSION`` error for consume requests from these older
- clients.
- "message_format_version": "3.0-IV1", # Optional. Default
- value is "3.0-IV1". The message_format_version specifies the message
- format version used by the broker to append messages to the logs. The
- value of this setting is assumed to be 3.0-IV1 if the broker protocol
- version is 3.0 or higher. By setting a particular message format
- version, all existing messages on disk must be smaller or equal to the
- specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
- "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
- "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
- "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
- "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
- "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
- "3.3-IV1", "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default
- value is "create_time". The message_timestamp_type specifies whether to
- use the message create time or log append time as the timestamp on a
- message. Known values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
- is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
- compaction (if enabled) in relation to duplicates present in the logs.
- For example, at 0.5, at most 50% of the log could be duplicates before
- compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0.
- The min_compaction_lag_ms specifies the minimum time (in ms) that a
- message will remain uncompacted in the log. Only relevant if log
- compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1.
- The min_insync_replicas specifies the number of replicas that must ACK a
- write for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False.
- The preallocate specifies whether a file should be preallocated on disk
- when creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is
- 604800000. The retention_ms specifies the maximum amount of time (in ms)
- to keep a message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is
- 209715200. The segment_bytes specifies the maximum size of a single log
- file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is
- 604800000. The segment_ms specifies the period of time after which the
- log will be forced to roll if the segment file isn't full. This ensures
- that retention can delete or compact old data.
- },
- "name": "str", # Optional. The name of the Kafka topic.
- "partitions": [
- {
- "consumer_groups": [
- {
- "group_name": "str", # Optional.
- Name of the consumer group.
- "offset": 0 # Optional. The current
- offset of the consumer group.
- }
- ],
- "earliest_offset": 0, # Optional. The earliest
- consumer offset amongst consumer groups.
- "id": 0, # Optional. An identifier for the
- partition.
- "in_sync_replicas": 0, # Optional. The number of
- nodes that are in-sync (have the latest data) for the given
- partition.
- "size": 0 # Optional. Size of the topic partition in
- bytes.
- }
- ],
- "replication_factor": 0, # Optional. The number of nodes to
- replicate data across the cluster.
- "state": "str" # Optional. The state of the Kafka topic. Known
- values are: "active", "configuring", "deleting", and "unknown".
- }
+ "schema": "str", # Optional. The schema definition in the specified format.
+ "schema_id": 0, # Optional. The id for schema.
+ "schema_type": "str", # Optional. The type of the schema. Known values are:
+ "AVRO", "JSON", and "PROTOBUF".
+ "subject_name": "str", # Optional. The name of the schema subject.
+ "version": "str" # Optional. The version of the schema.
}
# response body for status code(s): 404
response == {
@@ -118012,9 +125580,9 @@ async def get_kafka_topic(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_kafka_topic_request(
+ _request = build_databases_get_kafka_schema_request(
database_cluster_uuid=database_cluster_uuid,
- topic_name=topic_name,
+ subject_name=subject_name,
headers=_headers,
params=_params,
)
@@ -118073,258 +125641,27 @@ async def get_kafka_topic(
return cast(JSON, deserialized) # type: ignore
- @overload
- async def update_kafka_topic(
- self,
- database_cluster_uuid: str,
- topic_name: str,
- body: Optional[JSON] = None,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> JSON:
+ @distributed_trace_async
+ async def delete_kafka_schema(
+ self, database_cluster_uuid: str, subject_name: str, **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Update Topic for a Kafka Cluster.
-
- To update a topic attached to a Kafka cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``.
+ """Delete a Kafka Schema by Subject Name.
- The result will be a JSON object with a ``topic`` key.
+ To delete a specific schema by subject name for a Kafka cluster, send a DELETE request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param topic_name: The name used to identify the Kafka topic. Required.
- :type topic_name: str
- :param body: Default value is None.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :param subject_name: The name of the Kafka schema subject. Required.
+ :type subject_name: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is "delete".
- The cleanup_policy sets the retention policy to use on log segments. 'delete'
- will discard old segments when retention time/size limits are reached.
- 'compact' will enable log compaction, resulting in retention of the latest
- value for each key. Known values are: "delete", "compact", and
- "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the topic.
- Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value is
- 86400000. The delete_retention_ms specifies how long (in ms) to retain delete
- tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is 60000.
- The file_delete_delay_ms specifies the time (in ms) to wait before deleting a
- file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default value is
- 9223372036854776000. The flush_messages specifies the number of messages to
- accumulate on a log partition before messages are flushed to disk.
- "flush_ms": 9223372036854776000, # Optional. Default value is
- 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a
- message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is 4096. The
- index_interval_bytes specifies the number of bytes between entries being
- added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum
- amount of time (in ms) that a message will remain uncompacted. This is only
- applicable if the logs are have compaction enabled.
- "max_message_bytes": 1048588, # Optional. Default value is 1048588.
- The max_messages_bytes specifies the largest record batch size (in bytes)
- that can be sent to the server. This is calculated after compression if
- compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default value is
- True. The message_down_conversion_enable specifies whether down-conversion of
- message formats is enabled to satisfy consumer requests. When 'false', the
- broker will not perform conversion for consumers expecting older message
- formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for
- consume requests from these older clients.
- "message_format_version": "3.0-IV1", # Optional. Default value is
- "3.0-IV1". The message_format_version specifies the message format version
- used by the broker to append messages to the logs. The value of this setting
- is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By
- setting a particular message format version, all existing messages on disk
- must be smaller or equal to the specified version. Known values are: "0.8.0",
- "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0",
- "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1",
- "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0",
- "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0",
- "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1",
- "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default value
- is "create_time". The message_timestamp_type specifies whether to use the
- message create time or log append time as the timestamp on a message. Known
- values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5.
- The min_cleanable_dirty_ratio specifies the frequency of log compaction (if
- enabled) in relation to duplicates present in the logs. For example, at 0.5,
- at most 50% of the log could be duplicates before compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0. The
- min_compaction_lag_ms specifies the minimum time (in ms) that a message will
- remain uncompacted in the log. Only relevant if log compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1. The
- min_insync_replicas specifies the number of replicas that must ACK a write
- for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False. The
- preallocate specifies whether a file should be preallocated on disk when
- creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is 604800000.
- The retention_ms specifies the maximum amount of time (in ms) to keep a
- message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is 209715200.
- The segment_bytes specifies the maximum size of a single log file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is 604800000. The
- segment_ms specifies the period of time after which the log will be forced to
- roll if the segment file isn't full. This ensures that retention can delete
- or compact old data.
- },
- "partition_count": 0, # Optional. The number of partitions available for the
- topic. On update, this value can only be increased.
- "replication_factor": 0 # Optional. The number of nodes to replicate data
- across the cluster.
- }
-
- # response body for status code(s): 200
- response == {
- "topic": {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is
- "delete". The cleanup_policy sets the retention policy to use on log
- segments. 'delete' will discard old segments when retention time/size
- limits are reached. 'compact' will enable log compaction, resulting in
- retention of the latest value for each key. Known values are: "delete",
- "compact", and "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the
- topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value
- is 86400000. The delete_retention_ms specifies how long (in ms) to retain
- delete tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is
- 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
- deleting a file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The flush_messages specifies the number of
- messages to accumulate on a log partition before messages are flushed to
- disk.
- "flush_ms": 9223372036854776000, # Optional. Default value
- is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
- that a message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is
- 4096. The index_interval_bytes specifies the number of bytes between
- entries being added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional.
- Default value is 9223372036854776000. The max_compaction_lag_ms specifies
- the maximum amount of time (in ms) that a message will remain
- uncompacted. This is only applicable if the logs are have compaction
- enabled.
- "max_message_bytes": 1048588, # Optional. Default value is
- 1048588. The max_messages_bytes specifies the largest record batch size
- (in bytes) that can be sent to the server. This is calculated after
- compression if compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default
- value is True. The message_down_conversion_enable specifies whether
- down-conversion of message formats is enabled to satisfy consumer
- requests. When 'false', the broker will not perform conversion for
- consumers expecting older message formats. The broker will respond with
- an ``UNSUPPORTED_VERSION`` error for consume requests from these older
- clients.
- "message_format_version": "3.0-IV1", # Optional. Default
- value is "3.0-IV1". The message_format_version specifies the message
- format version used by the broker to append messages to the logs. The
- value of this setting is assumed to be 3.0-IV1 if the broker protocol
- version is 3.0 or higher. By setting a particular message format
- version, all existing messages on disk must be smaller or equal to the
- specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
- "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
- "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
- "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
- "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
- "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
- "3.3-IV1", "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default
- value is "create_time". The message_timestamp_type specifies whether to
- use the message create time or log append time as the timestamp on a
- message. Known values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
- is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
- compaction (if enabled) in relation to duplicates present in the logs.
- For example, at 0.5, at most 50% of the log could be duplicates before
- compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0.
- The min_compaction_lag_ms specifies the minimum time (in ms) that a
- message will remain uncompacted in the log. Only relevant if log
- compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1.
- The min_insync_replicas specifies the number of replicas that must ACK a
- write for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False.
- The preallocate specifies whether a file should be preallocated on disk
- when creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is
- 604800000. The retention_ms specifies the maximum amount of time (in ms)
- to keep a message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is
- 209715200. The segment_bytes specifies the maximum size of a single log
- file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is
- 604800000. The segment_ms specifies the period of time after which the
- log will be forced to roll if the segment file isn't full. This ensures
- that retention can delete or compact old data.
- },
- "name": "str", # Optional. The name of the Kafka topic.
- "partitions": [
- {
- "consumer_groups": [
- {
- "group_name": "str", # Optional.
- Name of the consumer group.
- "offset": 0 # Optional. The current
- offset of the consumer group.
- }
- ],
- "earliest_offset": 0, # Optional. The earliest
- consumer offset amongst consumer groups.
- "id": 0, # Optional. An identifier for the
- partition.
- "in_sync_replicas": 0, # Optional. The number of
- nodes that are in-sync (have the latest data) for the given
- partition.
- "size": 0 # Optional. Size of the topic partition in
- bytes.
- }
- ],
- "replication_factor": 0, # Optional. The number of nodes to
- replicate data across the cluster.
- "state": "str" # Optional. The state of the Kafka topic. Known
- values are: "active", "configuring", "deleting", and "unknown".
- }
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -118337,34 +125674,97 @@ async def update_kafka_topic(
tickets to help identify the issue.
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
- @overload
- async def update_kafka_topic(
- self,
- database_cluster_uuid: str,
- topic_name: str,
- body: Optional[IO[bytes]] = None,
- *,
- content_type: str = "application/json",
- **kwargs: Any
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+
+ _request = build_databases_delete_kafka_schema_request(
+ database_cluster_uuid=database_cluster_uuid,
+ subject_name=subject_name,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 204:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def get_kafka_schema_version(
+ self, database_cluster_uuid: str, subject_name: str, version: str, **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update Topic for a Kafka Cluster.
-
- To update a topic attached to a Kafka cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``.
+ """Get Kafka Schema by Subject Version.
- The result will be a JSON object with a ``topic`` key.
+ To get a specific schema by subject name for a Kafka cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME/versions/$VERSION``.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param topic_name: The name used to identify the Kafka topic. Required.
- :type topic_name: str
- :param body: Default value is None.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
+ :param subject_name: The name of the Kafka schema subject. Required.
+ :type subject_name: str
+ :param version: The version of the Kafka schema subject. Required.
+ :type version: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -118374,127 +125774,12 @@ async def update_kafka_topic(
# response body for status code(s): 200
response == {
- "topic": {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is
- "delete". The cleanup_policy sets the retention policy to use on log
- segments. 'delete' will discard old segments when retention time/size
- limits are reached. 'compact' will enable log compaction, resulting in
- retention of the latest value for each key. Known values are: "delete",
- "compact", and "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the
- topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value
- is 86400000. The delete_retention_ms specifies how long (in ms) to retain
- delete tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is
- 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
- deleting a file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The flush_messages specifies the number of
- messages to accumulate on a log partition before messages are flushed to
- disk.
- "flush_ms": 9223372036854776000, # Optional. Default value
- is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
- that a message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is
- 4096. The index_interval_bytes specifies the number of bytes between
- entries being added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional.
- Default value is 9223372036854776000. The max_compaction_lag_ms specifies
- the maximum amount of time (in ms) that a message will remain
- uncompacted. This is only applicable if the logs are have compaction
- enabled.
- "max_message_bytes": 1048588, # Optional. Default value is
- 1048588. The max_messages_bytes specifies the largest record batch size
- (in bytes) that can be sent to the server. This is calculated after
- compression if compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default
- value is True. The message_down_conversion_enable specifies whether
- down-conversion of message formats is enabled to satisfy consumer
- requests. When 'false', the broker will not perform conversion for
- consumers expecting older message formats. The broker will respond with
- an ``UNSUPPORTED_VERSION`` error for consume requests from these older
- clients.
- "message_format_version": "3.0-IV1", # Optional. Default
- value is "3.0-IV1". The message_format_version specifies the message
- format version used by the broker to append messages to the logs. The
- value of this setting is assumed to be 3.0-IV1 if the broker protocol
- version is 3.0 or higher. By setting a particular message format
- version, all existing messages on disk must be smaller or equal to the
- specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
- "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
- "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
- "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
- "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
- "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
- "3.3-IV1", "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default
- value is "create_time". The message_timestamp_type specifies whether to
- use the message create time or log append time as the timestamp on a
- message. Known values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
- is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
- compaction (if enabled) in relation to duplicates present in the logs.
- For example, at 0.5, at most 50% of the log could be duplicates before
- compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0.
- The min_compaction_lag_ms specifies the minimum time (in ms) that a
- message will remain uncompacted in the log. Only relevant if log
- compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1.
- The min_insync_replicas specifies the number of replicas that must ACK a
- write for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False.
- The preallocate specifies whether a file should be preallocated on disk
- when creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is
- 604800000. The retention_ms specifies the maximum amount of time (in ms)
- to keep a message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is
- 209715200. The segment_bytes specifies the maximum size of a single log
- file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is
- 604800000. The segment_ms specifies the period of time after which the
- log will be forced to roll if the segment file isn't full. This ensures
- that retention can delete or compact old data.
- },
- "name": "str", # Optional. The name of the Kafka topic.
- "partitions": [
- {
- "consumer_groups": [
- {
- "group_name": "str", # Optional.
- Name of the consumer group.
- "offset": 0 # Optional. The current
- offset of the consumer group.
- }
- ],
- "earliest_offset": 0, # Optional. The earliest
- consumer offset amongst consumer groups.
- "id": 0, # Optional. An identifier for the
- partition.
- "in_sync_replicas": 0, # Optional. The number of
- nodes that are in-sync (have the latest data) for the given
- partition.
- "size": 0 # Optional. Size of the topic partition in
- bytes.
- }
- ],
- "replication_factor": 0, # Optional. The number of nodes to
- replicate data across the cluster.
- "state": "str" # Optional. The state of the Kafka topic. Known
- values are: "active", "configuring", "deleting", and "unknown".
- }
+ "schema": "str", # Optional. The schema definition in the specified format.
+ "schema_id": 0, # Optional. The id for schema.
+ "schema_type": "str", # Optional. The type of the schema. Known values are:
+ "AVRO", "JSON", and "PROTOBUF".
+ "subject_name": "str", # Optional. The name of the schema subject.
+ "version": "str" # Optional. The version of the schema.
}
# response body for status code(s): 404
response == {
@@ -118508,253 +125793,112 @@ async def update_kafka_topic(
tickets to help identify the issue.
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
- @distributed_trace_async
- async def update_kafka_topic(
- self,
- database_cluster_uuid: str,
- topic_name: str,
- body: Optional[Union[JSON, IO[bytes]]] = None,
- **kwargs: Any
- ) -> JSON:
- # pylint: disable=line-too-long
- """Update Topic for a Kafka Cluster.
-
- To update a topic attached to a Kafka cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``.
-
- The result will be a JSON object with a ``topic`` key.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param topic_name: The name used to identify the Kafka topic. Required.
- :type topic_name: str
- :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
- :type body: JSON or IO[bytes]
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
- # JSON input template you can fill out and use as your body input.
- body = {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is "delete".
- The cleanup_policy sets the retention policy to use on log segments. 'delete'
- will discard old segments when retention time/size limits are reached.
- 'compact' will enable log compaction, resulting in retention of the latest
- value for each key. Known values are: "delete", "compact", and
- "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the topic.
- Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value is
- 86400000. The delete_retention_ms specifies how long (in ms) to retain delete
- tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is 60000.
- The file_delete_delay_ms specifies the time (in ms) to wait before deleting a
- file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default value is
- 9223372036854776000. The flush_messages specifies the number of messages to
- accumulate on a log partition before messages are flushed to disk.
- "flush_ms": 9223372036854776000, # Optional. Default value is
- 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a
- message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is 4096. The
- index_interval_bytes specifies the number of bytes between entries being
- added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum
- amount of time (in ms) that a message will remain uncompacted. This is only
- applicable if the logs are have compaction enabled.
- "max_message_bytes": 1048588, # Optional. Default value is 1048588.
- The max_messages_bytes specifies the largest record batch size (in bytes)
- that can be sent to the server. This is calculated after compression if
- compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default value is
- True. The message_down_conversion_enable specifies whether down-conversion of
- message formats is enabled to satisfy consumer requests. When 'false', the
- broker will not perform conversion for consumers expecting older message
- formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for
- consume requests from these older clients.
- "message_format_version": "3.0-IV1", # Optional. Default value is
- "3.0-IV1". The message_format_version specifies the message format version
- used by the broker to append messages to the logs. The value of this setting
- is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By
- setting a particular message format version, all existing messages on disk
- must be smaller or equal to the specified version. Known values are: "0.8.0",
- "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0",
- "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1",
- "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0",
- "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0",
- "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1",
- "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default value
- is "create_time". The message_timestamp_type specifies whether to use the
- message create time or log append time as the timestamp on a message. Known
- values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5.
- The min_cleanable_dirty_ratio specifies the frequency of log compaction (if
- enabled) in relation to duplicates present in the logs. For example, at 0.5,
- at most 50% of the log could be duplicates before compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0. The
- min_compaction_lag_ms specifies the minimum time (in ms) that a message will
- remain uncompacted in the log. Only relevant if log compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1. The
- min_insync_replicas specifies the number of replicas that must ACK a write
- for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False. The
- preallocate specifies whether a file should be preallocated on disk when
- creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is 604800000.
- The retention_ms specifies the maximum amount of time (in ms) to keep a
- message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is 209715200.
- The segment_bytes specifies the maximum size of a single log file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is 604800000. The
- segment_ms specifies the period of time after which the log will be forced to
- roll if the segment file isn't full. This ensures that retention can delete
- or compact old data.
- },
- "partition_count": 0, # Optional. The number of partitions available for the
- topic. On update, this value can only be increased.
- "replication_factor": 0 # Optional. The number of nodes to replicate data
- across the cluster.
- }
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- # response body for status code(s): 200
- response == {
- "topic": {
- "config": {
- "cleanup_policy": "delete", # Optional. Default value is
- "delete". The cleanup_policy sets the retention policy to use on log
- segments. 'delete' will discard old segments when retention time/size
- limits are reached. 'compact' will enable log compaction, resulting in
- retention of the latest value for each key. Known values are: "delete",
- "compact", and "compact_delete".
- "compression_type": "producer", # Optional. Default value is
- "producer". The compression_type specifies the compression type of the
- topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and
- "uncompressed".
- "delete_retention_ms": 86400000, # Optional. Default value
- is 86400000. The delete_retention_ms specifies how long (in ms) to retain
- delete tombstone markers for topics.
- "file_delete_delay_ms": 60000, # Optional. Default value is
- 60000. The file_delete_delay_ms specifies the time (in ms) to wait before
- deleting a file from the filesystem.
- "flush_messages": 9223372036854776000, # Optional. Default
- value is 9223372036854776000. The flush_messages specifies the number of
- messages to accumulate on a log partition before messages are flushed to
- disk.
- "flush_ms": 9223372036854776000, # Optional. Default value
- is 9223372036854776000. The flush_ms specifies the maximum time (in ms)
- that a message is kept in memory before being flushed to disk.
- "index_interval_bytes": 4096, # Optional. Default value is
- 4096. The index_interval_bytes specifies the number of bytes between
- entries being added into te offset index.
- "max_compaction_lag_ms": 9223372036854776000, # Optional.
- Default value is 9223372036854776000. The max_compaction_lag_ms specifies
- the maximum amount of time (in ms) that a message will remain
- uncompacted. This is only applicable if the logs are have compaction
- enabled.
- "max_message_bytes": 1048588, # Optional. Default value is
- 1048588. The max_messages_bytes specifies the largest record batch size
- (in bytes) that can be sent to the server. This is calculated after
- compression if compression is enabled.
- "message_down_conversion_enable": True, # Optional. Default
- value is True. The message_down_conversion_enable specifies whether
- down-conversion of message formats is enabled to satisfy consumer
- requests. When 'false', the broker will not perform conversion for
- consumers expecting older message formats. The broker will respond with
- an ``UNSUPPORTED_VERSION`` error for consume requests from these older
- clients.
- "message_format_version": "3.0-IV1", # Optional. Default
- value is "3.0-IV1". The message_format_version specifies the message
- format version used by the broker to append messages to the logs. The
- value of this setting is assumed to be 3.0-IV1 if the broker protocol
- version is 3.0 or higher. By setting a particular message format
- version, all existing messages on disk must be smaller or equal to the
- specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0",
- "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2",
- "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0",
- "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2",
- "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1",
- "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0",
- "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0",
- "3.3-IV1", "3.3-IV2", and "3.3-IV3".
- "message_timestamp_type": "create_time", # Optional. Default
- value is "create_time". The message_timestamp_type specifies whether to
- use the message create time or log append time as the timestamp on a
- message. Known values are: "create_time" and "log_append_time".
- "min_cleanable_dirty_ratio": 0.5, # Optional. Default value
- is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log
- compaction (if enabled) in relation to duplicates present in the logs.
- For example, at 0.5, at most 50% of the log could be duplicates before
- compaction would begin.
- "min_compaction_lag_ms": 0, # Optional. Default value is 0.
- The min_compaction_lag_ms specifies the minimum time (in ms) that a
- message will remain uncompacted in the log. Only relevant if log
- compaction is enabled.
- "min_insync_replicas": 1, # Optional. Default value is 1.
- The min_insync_replicas specifies the number of replicas that must ACK a
- write for the write to be considered successful.
- "preallocate": False, # Optional. Default value is False.
- The preallocate specifies whether a file should be preallocated on disk
- when creating a new log segment.
- "retention_bytes": -1, # Optional. Default value is -1. The
- retention_bytes specifies the maximum size of the log (in bytes) before
- deleting messages. -1 indicates that there is no limit.
- "retention_ms": 604800000, # Optional. Default value is
- 604800000. The retention_ms specifies the maximum amount of time (in ms)
- to keep a message before deleting it.
- "segment_bytes": 209715200, # Optional. Default value is
- 209715200. The segment_bytes specifies the maximum size of a single log
- file (in bytes).
- "segment_jitter_ms": 0, # Optional. Default value is 0. The
- segment_jitter_ms specifies the maximum random jitter subtracted from the
- scheduled segment roll time to avoid thundering herds of segment rolling.
- "segment_ms": 604800000 # Optional. Default value is
- 604800000. The segment_ms specifies the period of time after which the
- log will be forced to roll if the segment file isn't full. This ensures
- that retention can delete or compact old data.
- },
- "name": "str", # Optional. The name of the Kafka topic.
- "partitions": [
- {
- "consumer_groups": [
- {
- "group_name": "str", # Optional.
- Name of the consumer group.
- "offset": 0 # Optional. The current
- offset of the consumer group.
- }
- ],
- "earliest_offset": 0, # Optional. The earliest
- consumer offset amongst consumer groups.
- "id": 0, # Optional. An identifier for the
- partition.
- "in_sync_replicas": 0, # Optional. The number of
- nodes that are in-sync (have the latest data) for the given
- partition.
- "size": 0 # Optional. Size of the topic partition in
- bytes.
- }
- ],
- "replication_factor": 0, # Optional. The number of nodes to
- replicate data across the cluster.
- "state": "str" # Optional. The state of the Kafka topic. Known
- values are: "active", "configuring", "deleting", and "unknown".
- }
+ _request = build_databases_get_kafka_schema_version_request(
+ database_cluster_uuid=database_cluster_uuid,
+ subject_name=subject_name,
+ version=version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def get_kafka_schema_config(
+ self, database_cluster_uuid: str, **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Retrieve Schema Registry Configuration for a kafka Cluster.
+
+ To retrieve the Schema Registry configuration for a Kafka cluster, send a GET request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/config``.
+ The response is a JSON object with a ``compatibility_level`` key, which is set to an object
+ containing any database configuration parameters.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "compatibility_level": "str" # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
}
# response body for status code(s): 404
response == {
@@ -118781,31 +125925,13 @@ async def update_kafka_topic(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
cls: ClsType[JSON] = kwargs.pop("cls", None)
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- if body is not None:
- _json = body
- else:
- _json = None
-
- _request = build_databases_update_kafka_topic_request(
+ _request = build_databases_get_kafka_schema_config_request(
database_cluster_uuid=database_cluster_uuid,
- topic_name=topic_name,
- content_type=content_type,
- json=_json,
- content=_content,
headers=_headers,
params=_params,
)
@@ -118864,30 +125990,152 @@ async def update_kafka_topic(
return cast(JSON, deserialized) # type: ignore
- @distributed_trace_async
- async def delete_kafka_topic(
- self, database_cluster_uuid: str, topic_name: str, **kwargs: Any
- ) -> Optional[JSON]:
+ @overload
+ async def update_kafka_schema_config(
+ self,
+ database_cluster_uuid: str,
+ body: Optional[JSON] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Delete Topic for a Kafka Cluster.
+ """Update Schema Registry Configuration for a kafka Cluster.
- To delete a single topic within a Kafka cluster, send a DELETE request
- to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``.
+ To update the Schema Registry configuration for a Kafka cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/config``.
+ The response is a JSON object with a ``compatibility_level`` key, which is set to an object
+ containing any database configuration parameters.
- A status of 204 will be given. This indicates that the request was
- processed successfully, but that no response body is needed.
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Default value is None.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "compatibility_level": "str" # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ }
+
+ # response body for status code(s): 200
+ response == {
+ "compatibility_level": "str" # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def update_kafka_schema_config(
+ self,
+ database_cluster_uuid: str,
+ body: Optional[IO[bytes]] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update Schema Registry Configuration for a kafka Cluster.
+
+ To update the Schema Registry configuration for a Kafka cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/config``.
+ The response is a JSON object with a ``compatibility_level`` key, which is set to an object
+ containing any database configuration parameters.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param topic_name: The name used to identify the Kafka topic. Required.
- :type topic_name: str
- :return: JSON object or None
- :rtype: JSON or None
+ :param body: Default value is None.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "compatibility_level": "str" # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def update_kafka_schema_config(
+ self,
+ database_cluster_uuid: str,
+ body: Optional[Union[JSON, IO[bytes]]] = None,
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update Schema Registry Configuration for a kafka Cluster.
+
+ To update the Schema Registry configuration for a Kafka cluster, send a PUT request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/config``.
+ The response is a JSON object with a ``compatibility_level`` key, which is set to an object
+ containing any database configuration parameters.
+
+ :param database_cluster_uuid: A unique identifier for a database cluster. Required.
+ :type database_cluster_uuid: str
+ :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
+ :type body: JSON or IO[bytes]
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "compatibility_level": "str" # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ }
+
+ # response body for status code(s): 200
+ response == {
+ "compatibility_level": "str" # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -118913,14 +126161,30 @@ async def delete_kafka_topic(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_delete_kafka_topic_request(
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = body
+ else:
+ _json = None
+
+ _request = build_databases_update_kafka_schema_config_request(
database_cluster_uuid=database_cluster_uuid,
- topic_name=topic_name,
+ content_type=content_type,
+ json=_json,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -118935,15 +126199,14 @@ async def delete_kafka_topic(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -118954,6 +126217,11 @@ async def delete_kafka_topic(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -118971,20 +126239,27 @@ async def delete_kafka_topic(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
+ async def get_kafka_schema_subject_config(
+ self, database_cluster_uuid: str, subject_name: str, **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """List Logsinks for a Database Cluster.
+ """Retrieve Schema Registry Configuration for a Subject of kafka Cluster.
- To list logsinks for a database cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/logsink``.
+ To retrieve the Schema Registry configuration for a Subject of a Kafka cluster, send a GET
+ request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``.
+ The response is a JSON object with a ``compatibility_level`` key, which is set to an object
+ containing any database configuration parameters.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
+ :param subject_name: The name of the Kafka schema subject. Required.
+ :type subject_name: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -118994,16 +126269,10 @@ async def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "sinks": [
- {
- "config": {},
- "sink_id": "str", # Optional. A unique identifier for
- Logsink.
- "sink_name": "str", # Optional. The name of the Logsink.
- "sink_type": "str" # Optional. Known values are: "rsyslog",
- "elasticsearch", and "opensearch".
- }
- ]
+ "compatibility_level": "str", # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ "subject_name": "str" # The name of the schema subject. Required.
}
# response body for status code(s): 404
response == {
@@ -119035,8 +126304,9 @@ async def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_list_logsink_request(
+ _request = build_databases_get_kafka_schema_subject_config_request(
database_cluster_uuid=database_cluster_uuid,
+ subject_name=subject_name,
headers=_headers,
params=_params,
)
@@ -119096,23 +126366,29 @@ async def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON:
return cast(JSON, deserialized) # type: ignore
@overload
- async def create_logsink(
+ async def update_kafka_schema_subject_config(
self,
database_cluster_uuid: str,
- body: JSON,
+ subject_name: str,
+ body: Optional[JSON] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create Logsink for a Database Cluster.
+ """Update Schema Registry Configuration for a Subject of kafka Cluster.
- To create logsink for a database cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/logsink``.
+ To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT
+ request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``.
+ The response is a JSON object with a ``compatibility_level`` key, which is set to an object
+ containing any database configuration parameters.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Required.
+ :param subject_name: The name of the Kafka schema subject. Required.
+ :type subject_name: str
+ :param body: Default value is None.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
@@ -119126,25 +126402,17 @@ async def create_logsink(
# JSON input template you can fill out and use as your body input.
body = {
- "config": {},
- "sink_name": "str", # Optional. The name of the Logsink.
- "sink_type": "str" # Optional. Type of logsink integration. * Use
- ``datadog`` for Datadog integration **only with MongoDB clusters**. * For
- non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other
- supported types include ``elasticsearch`` and ``opensearch``. More details about
- the configuration can be found in the ``config`` property. Known values are:
- "rsyslog", "elasticsearch", "opensearch", and "datadog".
+ "compatibility_level": "str" # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
}
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
- "sink": {
- "config": {},
- "sink_id": "str", # Optional. A unique identifier for Logsink.
- "sink_name": "str", # Optional. The name of the Logsink.
- "sink_type": "str" # Optional. Known values are: "rsyslog",
- "elasticsearch", and "opensearch".
- }
+ "compatibility_level": "str", # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ "subject_name": "str" # The name of the schema subject. Required.
}
# response body for status code(s): 404
response == {
@@ -119160,23 +126428,29 @@ async def create_logsink(
"""
@overload
- async def create_logsink(
+ async def update_kafka_schema_subject_config(
self,
database_cluster_uuid: str,
- body: IO[bytes],
+ subject_name: str,
+ body: Optional[IO[bytes]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create Logsink for a Database Cluster.
+ """Update Schema Registry Configuration for a Subject of kafka Cluster.
- To create logsink for a database cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/logsink``.
+ To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT
+ request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``.
+ The response is a JSON object with a ``compatibility_level`` key, which is set to an object
+ containing any database configuration parameters.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Required.
+ :param subject_name: The name of the Kafka schema subject. Required.
+ :type subject_name: str
+ :param body: Default value is None.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
@@ -119188,15 +126462,12 @@ async def create_logsink(
Example:
.. code-block:: python
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
- "sink": {
- "config": {},
- "sink_id": "str", # Optional. A unique identifier for Logsink.
- "sink_name": "str", # Optional. The name of the Logsink.
- "sink_type": "str" # Optional. Known values are: "rsyslog",
- "elasticsearch", and "opensearch".
- }
+ "compatibility_level": "str", # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ "subject_name": "str" # The name of the schema subject. Required.
}
# response body for status code(s): 404
response == {
@@ -119212,18 +126483,27 @@ async def create_logsink(
"""
@distributed_trace_async
- async def create_logsink(
- self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ async def update_kafka_schema_subject_config(
+ self,
+ database_cluster_uuid: str,
+ subject_name: str,
+ body: Optional[Union[JSON, IO[bytes]]] = None,
+ **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create Logsink for a Database Cluster.
+ """Update Schema Registry Configuration for a Subject of kafka Cluster.
- To create logsink for a database cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/logsink``.
+ To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT
+ request to
+ ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``.
+ The response is a JSON object with a ``compatibility_level`` key, which is set to an object
+ containing any database configuration parameters.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :param subject_name: The name of the Kafka schema subject. Required.
+ :type subject_name: str
+ :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
:type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
@@ -119234,25 +126514,17 @@ async def create_logsink(
# JSON input template you can fill out and use as your body input.
body = {
- "config": {},
- "sink_name": "str", # Optional. The name of the Logsink.
- "sink_type": "str" # Optional. Type of logsink integration. * Use
- ``datadog`` for Datadog integration **only with MongoDB clusters**. * For
- non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other
- supported types include ``elasticsearch`` and ``opensearch``. More details about
- the configuration can be found in the ``config`` property. Known values are:
- "rsyslog", "elasticsearch", "opensearch", and "datadog".
+ "compatibility_level": "str" # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
}
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
- "sink": {
- "config": {},
- "sink_id": "str", # Optional. A unique identifier for Logsink.
- "sink_name": "str", # Optional. The name of the Logsink.
- "sink_type": "str" # Optional. Known values are: "rsyslog",
- "elasticsearch", and "opensearch".
- }
+ "compatibility_level": "str", # The compatibility level of the schema
+ registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
+ "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ "subject_name": "str" # The name of the schema subject. Required.
}
# response body for status code(s): 404
response == {
@@ -119293,10 +126565,14 @@ async def create_logsink(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- _json = body
+ if body is not None:
+ _json = body
+ else:
+ _json = None
- _request = build_databases_create_logsink_request(
+ _request = build_databases_update_kafka_schema_subject_config_request(
database_cluster_uuid=database_cluster_uuid,
+ subject_name=subject_name,
content_type=content_type,
json=_json,
content=_content,
@@ -119314,14 +126590,14 @@ async def create_logsink(
response = pipeline_response.http_response
- if response.status_code not in [201, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 201:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -119359,19 +126635,14 @@ async def create_logsink(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_logsink(
- self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any
- ) -> JSON:
+ async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Get Logsink for a Database Cluster.
+ """Retrieve Database Clusters' Metrics Endpoint Credentials.
- To get a logsink for a database cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``.
+ To show the credentials for all database clusters' metrics endpoints, send a GET request to
+ ``/v2/databases/metrics/credentials``. The result will be a JSON object with a ``credentials``
+ key.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param logsink_id: A unique identifier for a logsink of a database cluster. Required.
- :type logsink_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -119381,11 +126652,14 @@ async def get_logsink(
# response body for status code(s): 200
response == {
- "config": {},
- "sink_id": "str", # Optional. A unique identifier for Logsink.
- "sink_name": "str", # Optional. The name of the Logsink.
- "sink_type": "str" # Optional. Known values are: "rsyslog", "elasticsearch",
- and "opensearch".
+ "credentials": {
+ "credentials": {
+ "basic_auth_password": "str", # Optional. basic
+ authentication password for metrics HTTP endpoint.
+ "basic_auth_username": "str" # Optional. basic
+ authentication username for metrics HTTP endpoint.
+ }
+ }
}
# response body for status code(s): 404
response == {
@@ -119417,9 +126691,7 @@ async def get_logsink(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_logsink_request(
- database_cluster_uuid=database_cluster_uuid,
- logsink_id=logsink_id,
+ _request = build_databases_get_cluster_metrics_credentials_request(
headers=_headers,
params=_params,
)
@@ -119479,32 +126751,26 @@ async def get_logsink(
return cast(JSON, deserialized) # type: ignore
@overload
- async def update_logsink(
+ async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements
self,
- database_cluster_uuid: str,
- logsink_id: str,
- body: JSON,
+ body: Optional[JSON] = None,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Update Logsink for a Database Cluster.
+ ) -> None:
+ """Update Database Clusters' Metrics Endpoint Credentials.
- To update a logsink for a database cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``.
+ To update the credentials for all database clusters' metrics endpoints, send a PUT request to
+ ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content
+ status code with no body in response.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param logsink_id: A unique identifier for a logsink of a database cluster. Required.
- :type logsink_id: str
- :param body: Required.
+ :param body: Default value is None.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: None
+ :rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -119512,99 +126778,179 @@ async def update_logsink(
# JSON input template you can fill out and use as your body input.
body = {
- "config": {}
- }
-
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
+ "credentials": {
+ "basic_auth_password": "str", # Optional. basic authentication
+ password for metrics HTTP endpoint.
+ "basic_auth_username": "str" # Optional. basic authentication
+ username for metrics HTTP endpoint.
+ }
}
"""
@overload
- async def update_logsink(
+ async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements
self,
- database_cluster_uuid: str,
- logsink_id: str,
- body: IO[bytes],
+ body: Optional[IO[bytes]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Update Logsink for a Database Cluster.
+ ) -> None:
+ """Update Database Clusters' Metrics Endpoint Credentials.
- To update a logsink for a database cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``.
+ To update the credentials for all database clusters' metrics endpoints, send a PUT request to
+ ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content
+ status code with no body in response.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param logsink_id: A unique identifier for a logsink of a database cluster. Required.
- :type logsink_id: str
- :param body: Required.
+ :param body: Default value is None.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: None
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements
+ self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any
+ ) -> None:
+ """Update Database Clusters' Metrics Endpoint Credentials.
+
+ To update the credentials for all database clusters' metrics endpoints, send a PUT request to
+ ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content
+ status code with no body in response.
+
+ :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
+ :type body: JSON or IO[bytes]
+ :return: None
+ :rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "credentials": {
+ "basic_auth_password": "str", # Optional. basic authentication
+ password for metrics HTTP endpoint.
+ "basic_auth_username": "str" # Optional. basic authentication
+ username for metrics HTTP endpoint.
+ }
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = kwargs.pop("params", {}) or {}
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = body
+ else:
+ _json = None
+
+ _request = build_databases_update_cluster_metrics_credentials_request(
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if cls:
+ return cls(pipeline_response, None, response_headers) # type: ignore
@distributed_trace_async
- async def update_logsink(
- self,
- database_cluster_uuid: str,
- logsink_id: str,
- body: Union[JSON, IO[bytes]],
- **kwargs: Any
- ) -> Optional[JSON]:
+ async def list_opeasearch_indexes(
+ self, database_cluster_uuid: str, **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Update Logsink for a Database Cluster.
+ """List Indexes for a OpenSearch Cluster.
- To update a logsink for a database cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``.
+ To list all of a OpenSearch cluster's indexes, send a GET request to
+ ``/v2/databases/$DATABASE_ID/indexes``.
+
+ The result will be a JSON object with a ``indexes`` key.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param logsink_id: A unique identifier for a logsink of a database cluster. Required.
- :type logsink_id: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
- :type body: JSON or IO[bytes]
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "config": {}
+ # response body for status code(s): 200
+ response == {
+ "indexes": [
+ {
+ "created_time": "2020-02-20 00:00:00", # Optional. The date
+ and time the index was created.
+ "health": "str", # Optional. The health of the OpenSearch
+ index. Known values are: "unknown", "green", "yellow", "red", and "red*".
+ "index_name": "str", # Optional. The name of the opensearch
+ index.
+ "number_of_replicas": 0, # Optional. The number of replicas
+ for the index.
+ "number_of_shards": 0, # Optional. The number of shards for
+ the index.
+ "size": 0, # Optional. The size of the index.
+ "status": "str" # Optional. The status of the OpenSearch
+ index. Known values are: "unknown", "open", "close", and "none".
+ }
+ ]
}
-
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -119630,28 +126976,13 @@ async def update_logsink(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
-
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- _json = body
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_update_logsink_request(
+ _request = build_databases_list_opeasearch_indexes_request(
database_cluster_uuid=database_cluster_uuid,
- logsink_id=logsink_id,
- content_type=content_type,
- json=_json,
- content=_content,
headers=_headers,
params=_params,
)
@@ -119672,7 +127003,6 @@ async def update_logsink(
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
@@ -119685,6 +127015,11 @@ async def update_logsink(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -119702,24 +127037,27 @@ async def update_logsink(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def delete_logsink(
- self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any
+ async def delete_opensearch_index(
+ self, database_cluster_uuid: str, index_name: str, **kwargs: Any
) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Delete Logsink for a Database Cluster.
+ """Delete Index for OpenSearch Cluster.
- To delete a logsink for a database cluster, send a DELETE request to
- ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``.
+ To delete a single index within OpenSearch cluster, send a DELETE request
+ to ``/v2/databases/$DATABASE_ID/indexes/$INDEX_NAME``.
+
+ A status of 204 will be given. This indicates that the request was
+ processed successfully, but that no response body is needed.
:param database_cluster_uuid: A unique identifier for a database cluster. Required.
:type database_cluster_uuid: str
- :param logsink_id: A unique identifier for a logsink of a database cluster. Required.
- :type logsink_id: str
+ :param index_name: The name of the OpenSearch index. Required.
+ :type index_name: str
:return: JSON object or None
:rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
@@ -119757,9 +127095,9 @@ async def delete_logsink(
cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_databases_delete_logsink_request(
+ _request = build_databases_delete_opensearch_index_request(
database_cluster_uuid=database_cluster_uuid,
- logsink_id=logsink_id,
+ index_name=index_name,
headers=_headers,
params=_params,
)
@@ -119774,7 +127112,7 @@ async def delete_logsink(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -119782,7 +127120,7 @@ async def delete_logsink(
deserialized = None
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -119814,18 +127152,38 @@ async def delete_logsink(
return deserialized # type: ignore
+
+class DedicatedInferencesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~pydo.aio.GeneratedClient`'s
+ :attr:`dedicated_inferences` attribute.
+ """
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = (
+ input_args.pop(0) if input_args else kwargs.pop("deserializer")
+ )
+
@distributed_trace_async
- async def list_kafka_schemas(
- self, database_cluster_uuid: str, **kwargs: Any
- ) -> JSON:
+ async def get(self, dedicated_inference_id: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """List Schemas for Kafka Cluster.
+ """Get a Dedicated Inference.
- To list all schemas for a Kafka cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/schema-registry``.
+ Retrieve an existing Dedicated Inference by ID. Send a GET request to
+ ``/v2/dedicated-inferences/{dedicated_inference_id}``. The status in the response
+ is one of active, new, provisioning, updating, deleting, or error.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -119835,17 +127193,109 @@ async def list_kafka_schemas(
# response body for status code(s): 200
response == {
- "subjects": [
- {
- "schema": "str", # Optional. The schema definition in the
- specified format.
- "schema_id": 0, # Optional. The id for schema.
- "schema_type": "str", # Optional. The type of the schema.
- Known values are: "AVRO", "JSON", and "PROTOBUF".
- "subject_name": "str" # Optional. The name of the schema
- subject.
- }
- ]
+ "dedicated_inference": {
+ "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was created.
+ "endpoints": {
+ "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN
+ of the Dedicated Inference instance.
+ "public_endpoint_fqdn": "str" # Optional. Public FQDN of the
+ Dedicated Inference instance.
+ },
+ "id": "str", # Optional. Unique ID of the Dedicated Inference.
+ "pending_deployment_spec": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "enable_public_endpoint": bool, # Optional. Whether to
+ expose a public LLM endpoint.
+ "id": "str", # Optional. Deployment UUID.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Optional. Name of the Dedicated Inference.
+ Must be unique within the team.
+ "status": "str", # Optional. Known values are:
+ "provisioning" and "updating".
+ "updated_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "version": 0, # Optional. Spec version.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "region": "str", # Optional. DigitalOcean region where the Dedicated
+ Inference is hosted.
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public
+ LLM endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be
+ unique within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated
+ Inference is hosted. Required. Known values are: "atl1", "nyc2", and
+ "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "status": "str", # Optional. Current state of the Dedicated
+ Inference. Known values are: "active", "new", "provisioning", "updating",
+ "deleting", and "error".
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was last updated.
+ "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference.
+ }
}
# response body for status code(s): 404
response == {
@@ -119877,8 +127327,8 @@ async def list_kafka_schemas(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_list_kafka_schemas_request(
- database_cluster_uuid=database_cluster_uuid,
+ _request = build_dedicated_inferences_get_request(
+ dedicated_inference_id=dedicated_inference_id,
headers=_headers,
params=_params,
)
@@ -119938,22 +127388,24 @@ async def list_kafka_schemas(
return cast(JSON, deserialized) # type: ignore
@overload
- async def create_kafka_schema(
+ async def patch(
self,
- database_cluster_uuid: str,
+ dedicated_inference_id: str,
body: JSON,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create Schema Registry for Kafka Cluster.
+ """Update a Dedicated Inference.
- To create a Kafka schema for a database cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/schema-registry``.
+ Update an existing Dedicated Inference. Send a PATCH request to
+ ``/v2/dedicated-inferences/{dedicated_inference_id}`` with updated ``spec`` and/or
+ ``access_tokens``. Status will move to updating and return to active when done.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
:param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
@@ -119968,19 +127420,155 @@ async def create_kafka_schema(
# JSON input template you can fill out and use as your body input.
body = {
- "schema": "str", # Optional. The schema definition in the specified format.
- "schema_type": "str", # Optional. The type of the schema. Known values are:
- "AVRO", "JSON", and "PROTOBUF".
- "subject_name": "str" # Optional. The name of the schema subject.
+ "access_tokens": {
+ "hugging_face_token": "str" # Optional. Hugging Face token required
+ for gated models.
+ },
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public LLM
+ endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of accelerator
+ instances. Required.
+ "type": "str", # Accelerator type
+ (e.g. prefill_decode). Required.
+ "status": "str" # Optional. Current
+ state of the Accelerator. Known values are: "new",
+ "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to identify an
+ existing deployment when updating; empty means create new.
+ "model_provider": "str", # Optional. Model provider.
+ "hugging_face"
+ "model_slug": "str", # Optional. Model identifier
+ (e.g. Hugging Face slug).
+ "workload_config": {} # Optional. Workload-specific
+ configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be unique
+ within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated Inference
+ is hosted. Required. Known values are: "atl1", "nyc2", and "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated Inference.
+ Required.
+ }
+ }
}
- # response body for status code(s): 201
+ # response body for status code(s): 202
response == {
- "schema": "str", # Optional. The schema definition in the specified format.
- "schema_id": 0, # Optional. The id for schema.
- "schema_type": "str", # Optional. The type of the schema. Known values are:
- "AVRO", "JSON", and "PROTOBUF".
- "subject_name": "str" # Optional. The name of the schema subject.
+ "dedicated_inference": {
+ "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was created.
+ "endpoints": {
+ "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN
+ of the Dedicated Inference instance.
+ "public_endpoint_fqdn": "str" # Optional. Public FQDN of the
+ Dedicated Inference instance.
+ },
+ "id": "str", # Optional. Unique ID of the Dedicated Inference.
+ "pending_deployment_spec": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "enable_public_endpoint": bool, # Optional. Whether to
+ expose a public LLM endpoint.
+ "id": "str", # Optional. Deployment UUID.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Optional. Name of the Dedicated Inference.
+ Must be unique within the team.
+ "status": "str", # Optional. Known values are:
+ "provisioning" and "updating".
+ "updated_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "version": 0, # Optional. Spec version.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "region": "str", # Optional. DigitalOcean region where the Dedicated
+ Inference is hosted.
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public
+ LLM endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be
+ unique within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated
+ Inference is hosted. Required. Known values are: "atl1", "nyc2", and
+ "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "status": "str", # Optional. Current state of the Dedicated
+ Inference. Known values are: "active", "new", "provisioning", "updating",
+ "deleting", and "error".
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was last updated.
+ "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference.
+ }
}
# response body for status code(s): 404
response == {
@@ -119996,22 +127584,24 @@ async def create_kafka_schema(
"""
@overload
- async def create_kafka_schema(
+ async def patch(
self,
- database_cluster_uuid: str,
+ dedicated_inference_id: str,
body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create Schema Registry for Kafka Cluster.
+ """Update a Dedicated Inference.
- To create a Kafka schema for a database cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/schema-registry``.
+ Update an existing Dedicated Inference. Send a PATCH request to
+ ``/v2/dedicated-inferences/{dedicated_inference_id}`` with updated ``spec`` and/or
+ ``access_tokens``. Status will move to updating and return to active when done.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
:param body: Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
@@ -120024,13 +127614,111 @@ async def create_kafka_schema(
Example:
.. code-block:: python
- # response body for status code(s): 201
+ # response body for status code(s): 202
response == {
- "schema": "str", # Optional. The schema definition in the specified format.
- "schema_id": 0, # Optional. The id for schema.
- "schema_type": "str", # Optional. The type of the schema. Known values are:
- "AVRO", "JSON", and "PROTOBUF".
- "subject_name": "str" # Optional. The name of the schema subject.
+ "dedicated_inference": {
+ "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was created.
+ "endpoints": {
+ "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN
+ of the Dedicated Inference instance.
+ "public_endpoint_fqdn": "str" # Optional. Public FQDN of the
+ Dedicated Inference instance.
+ },
+ "id": "str", # Optional. Unique ID of the Dedicated Inference.
+ "pending_deployment_spec": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "enable_public_endpoint": bool, # Optional. Whether to
+ expose a public LLM endpoint.
+ "id": "str", # Optional. Deployment UUID.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Optional. Name of the Dedicated Inference.
+ Must be unique within the team.
+ "status": "str", # Optional. Known values are:
+ "provisioning" and "updating".
+ "updated_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "version": 0, # Optional. Spec version.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "region": "str", # Optional. DigitalOcean region where the Dedicated
+ Inference is hosted.
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public
+ LLM endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be
+ unique within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated
+ Inference is hosted. Required. Known values are: "atl1", "nyc2", and
+ "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "status": "str", # Optional. Current state of the Dedicated
+ Inference. Known values are: "active", "new", "provisioning", "updating",
+ "deleting", and "error".
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was last updated.
+ "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference.
+ }
}
# response body for status code(s): 404
response == {
@@ -120046,17 +127734,19 @@ async def create_kafka_schema(
"""
@distributed_trace_async
- async def create_kafka_schema(
- self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ async def patch(
+ self, dedicated_inference_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create Schema Registry for Kafka Cluster.
+ """Update a Dedicated Inference.
- To create a Kafka schema for a database cluster, send a POST request to
- ``/v2/databases/$DATABASE_ID/schema-registry``.
+ Update an existing Dedicated Inference. Send a PATCH request to
+ ``/v2/dedicated-inferences/{dedicated_inference_id}`` with updated ``spec`` and/or
+ ``access_tokens``. Status will move to updating and return to active when done.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
:return: JSON object
@@ -120068,19 +127758,155 @@ async def create_kafka_schema(
# JSON input template you can fill out and use as your body input.
body = {
- "schema": "str", # Optional. The schema definition in the specified format.
- "schema_type": "str", # Optional. The type of the schema. Known values are:
- "AVRO", "JSON", and "PROTOBUF".
- "subject_name": "str" # Optional. The name of the schema subject.
+ "access_tokens": {
+ "hugging_face_token": "str" # Optional. Hugging Face token required
+ for gated models.
+ },
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public LLM
+ endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of accelerator
+ instances. Required.
+ "type": "str", # Accelerator type
+ (e.g. prefill_decode). Required.
+ "status": "str" # Optional. Current
+ state of the Accelerator. Known values are: "new",
+ "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to identify an
+ existing deployment when updating; empty means create new.
+ "model_provider": "str", # Optional. Model provider.
+ "hugging_face"
+ "model_slug": "str", # Optional. Model identifier
+ (e.g. Hugging Face slug).
+ "workload_config": {} # Optional. Workload-specific
+ configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be unique
+ within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated Inference
+ is hosted. Required. Known values are: "atl1", "nyc2", and "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated Inference.
+ Required.
+ }
+ }
}
- # response body for status code(s): 201
+ # response body for status code(s): 202
response == {
- "schema": "str", # Optional. The schema definition in the specified format.
- "schema_id": 0, # Optional. The id for schema.
- "schema_type": "str", # Optional. The type of the schema. Known values are:
- "AVRO", "JSON", and "PROTOBUF".
- "subject_name": "str" # Optional. The name of the schema subject.
+ "dedicated_inference": {
+ "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was created.
+ "endpoints": {
+ "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN
+ of the Dedicated Inference instance.
+ "public_endpoint_fqdn": "str" # Optional. Public FQDN of the
+ Dedicated Inference instance.
+ },
+ "id": "str", # Optional. Unique ID of the Dedicated Inference.
+ "pending_deployment_spec": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "enable_public_endpoint": bool, # Optional. Whether to
+ expose a public LLM endpoint.
+ "id": "str", # Optional. Deployment UUID.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Optional. Name of the Dedicated Inference.
+ Must be unique within the team.
+ "status": "str", # Optional. Known values are:
+ "provisioning" and "updating".
+ "updated_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "version": 0, # Optional. Spec version.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "region": "str", # Optional. DigitalOcean region where the Dedicated
+ Inference is hosted.
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public
+ LLM endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be
+ unique within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated
+ Inference is hosted. Required. Known values are: "atl1", "nyc2", and
+ "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "status": "str", # Optional. Current state of the Dedicated
+ Inference. Known values are: "active", "new", "provisioning", "updating",
+ "deleting", and "error".
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was last updated.
+ "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference.
+ }
}
# response body for status code(s): 404
response == {
@@ -120123,8 +127949,8 @@ async def create_kafka_schema(
else:
_json = body
- _request = build_databases_create_kafka_schema_request(
- database_cluster_uuid=database_cluster_uuid,
+ _request = build_dedicated_inferences_patch_request(
+ dedicated_inference_id=dedicated_inference_id,
content_type=content_type,
json=_json,
content=_content,
@@ -120142,14 +127968,126 @@ async def create_kafka_schema(
response = pipeline_response.http_response
- if response.status_code not in [201, 404]:
+ if response.status_code not in [202, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def delete(
+ self, dedicated_inference_id: str, **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Delete a Dedicated Inference.
+
+ Delete an existing Dedicated Inference. Send a DELETE request to
+ ``/v2/dedicated-inferences/{dedicated_inference_id}``. The response 202 Accepted
+ indicates the request was accepted for processing.
+
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+
+ _request = build_dedicated_inferences_delete_request(
+ dedicated_inference_id=dedicated_inference_id,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 201:
+ if response.status_code == 202:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -120160,11 +128098,6 @@ async def create_kafka_schema(
"int", response.headers.get("ratelimit-reset")
)
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -120182,24 +128115,33 @@ async def create_kafka_schema(
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
@distributed_trace_async
- async def get_kafka_schema(
- self, database_cluster_uuid: str, subject_name: str, **kwargs: Any
+ async def list(
+ self,
+ *,
+ per_page: int = 20,
+ page: int = 1,
+ region: Optional[str] = None,
+ **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Get a Kafka Schema by Subject Name.
+ """List Dedicated Inferences.
- To get a specific schema by subject name for a Kafka cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``.
+ List all Dedicated Inference instances for your team. Send a GET request to
+ ``/v2/dedicated-inferences``. You may filter by region and use page and per_page
+ for pagination.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param subject_name: The name of the Kafka schema subject. Required.
- :type subject_name: str
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
+ :keyword region: Filter by region. Dedicated Inference is only available in nyc2, tor1, and
+ atl1. Known values are: "nyc2", "tor1", and "atl1". Default value is None.
+ :paramtype region: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -120209,23 +128151,122 @@ async def get_kafka_schema(
# response body for status code(s): 200
response == {
- "schema": "str", # Optional. The schema definition in the specified format.
- "schema_id": 0, # Optional. The id for schema.
- "schema_type": "str", # Optional. The type of the schema. Known values are:
- "AVRO", "JSON", and "PROTOBUF".
- "subject_name": "str", # Optional. The name of the schema subject.
- "version": "str" # Optional. The version of the schema.
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
+ "dedicated_inferences": [
+ {
+ "created_at": "2020-02-20 00:00:00", # Optional. When the
+ Dedicated Inference was created.
+ "endpoints": {
+ "private_endpoint_fqdn": "str", # Optional. Private
+ VPC FQDN of the Dedicated Inference instance.
+ "public_endpoint_fqdn": "str" # Optional. Public
+ FQDN of the Dedicated Inference instance.
+ },
+ "id": "str", # Optional. Unique ID of the Dedicated
+ Inference.
+ "pending_deployment_spec": {
+ "created_at": "2020-02-20 00:00:00", # Optional.
+ Pending deployment when status is provisioning or updating.
+ "enable_public_endpoint": bool, # Optional. Whether
+ to expose a public LLM endpoint.
+ "id": "str", # Optional. Deployment UUID.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug":
+ "str", # DigitalOcean GPU slug. Required.
+ "scale": 0, # Number
+ of accelerator instances. Required.
+ "type": "str", #
+ Accelerator type (e.g. prefill_decode). Required.
+ "status": "str" #
+ Optional. Current state of the Accelerator. Known
+ values are: "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used
+ to identify an existing deployment when updating; empty means
+ create new.
+ "model_provider": "str", # Optional.
+ Model provider. "hugging_face"
+ "model_slug": "str", # Optional.
+ Model identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Optional. Name of the Dedicated
+ Inference. Must be unique within the team.
+ "status": "str", # Optional. Known values are:
+ "provisioning" and "updating".
+ "updated_at": "2020-02-20 00:00:00", # Optional.
+ Pending deployment when status is provisioning or updating.
+ "version": 0, # Optional. Spec version.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "region": "str", # Optional. DigitalOcean region where the
+ Dedicated Inference is hosted.
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose
+ a public LLM endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug":
+ "str", # DigitalOcean GPU slug. Required.
+ "scale": 0, # Number
+ of accelerator instances. Required.
+ "type": "str", #
+ Accelerator type (e.g. prefill_decode). Required.
+ "status": "str" #
+ Optional. Current state of the Accelerator. Known
+ values are: "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used
+ to identify an existing deployment when updating; empty means
+ create new.
+ "model_provider": "str", # Optional.
+ Model provider. "hugging_face"
+ "model_slug": "str", # Optional.
+ Model identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference.
+ Must be unique within the team. Required.
+ "region": "str", # DigitalOcean region where the
+ Dedicated Inference is hosted. Required. Known values are: "atl1",
+ "nyc2", and "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "status": "str", # Optional. Current state of the Dedicated
+ Inference. Known values are: "active", "new", "provisioning", "updating",
+ "deleting", and "error".
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the
+ Dedicated Inference was last updated.
+ "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated
+ Inference.
+ }
+ ],
+ "links": {
+ "pages": {
+ "str": "str" # Optional. Pagination links (first, prev,
+ next, last).
+ }
+ },
+ "meta": {
+ "total": 0 # Total number of results. Required.
+ }
}
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
@@ -120246,9 +128287,10 @@ async def get_kafka_schema(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_kafka_schema_request(
- database_cluster_uuid=database_cluster_uuid,
- subject_name=subject_name,
+ _request = build_dedicated_inferences_list_request(
+ per_page=per_page,
+ page=page,
+ region=region,
headers=_headers,
params=_params,
)
@@ -120263,81 +128305,543 @@ async def get_kafka_schema(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [200]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
return cast(JSON, deserialized) # type: ignore
+ @overload
+ async def create(
+ self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Create a Dedicated Inference.
+
+ Create a new Dedicated Inference for your team. Send a POST request to
+ ``/v2/dedicated-inferences`` with a ``spec`` object (version, name, region, vpc,
+ enable_public_endpoint, model_deployments) and optional ``access_tokens`` (e.g.
+ hugging_face_token for gated models). The response code 202 Accepted indicates
+ the request was accepted for processing; it does not indicate success or failure.
+ The token value is returned only on create; store it securely.
+
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public LLM
+ endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of accelerator
+ instances. Required.
+ "type": "str", # Accelerator type
+ (e.g. prefill_decode). Required.
+ "status": "str" # Optional. Current
+ state of the Accelerator. Known values are: "new",
+ "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to identify an
+ existing deployment when updating; empty means create new.
+ "model_provider": "str", # Optional. Model provider.
+ "hugging_face"
+ "model_slug": "str", # Optional. Model identifier
+ (e.g. Hugging Face slug).
+ "workload_config": {} # Optional. Workload-specific
+ configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be unique
+ within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated Inference
+ is hosted. Required. Known values are: "atl1", "nyc2", and "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated Inference.
+ Required.
+ }
+ },
+ "access_tokens": {
+ "str": "str" # Optional. Key-value pairs for provider tokens (e.g.
+ Hugging Face).
+ }
+ }
+
+ # response body for status code(s): 202
+ response == {
+ "dedicated_inference": {
+ "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was created.
+ "endpoints": {
+ "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN
+ of the Dedicated Inference instance.
+ "public_endpoint_fqdn": "str" # Optional. Public FQDN of the
+ Dedicated Inference instance.
+ },
+ "id": "str", # Optional. Unique ID of the Dedicated Inference.
+ "pending_deployment_spec": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "enable_public_endpoint": bool, # Optional. Whether to
+ expose a public LLM endpoint.
+ "id": "str", # Optional. Deployment UUID.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Optional. Name of the Dedicated Inference.
+ Must be unique within the team.
+ "status": "str", # Optional. Known values are:
+ "provisioning" and "updating".
+ "updated_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "version": 0, # Optional. Spec version.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "region": "str", # Optional. DigitalOcean region where the Dedicated
+ Inference is hosted.
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public
+ LLM endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be
+ unique within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated
+ Inference is hosted. Required. Known values are: "atl1", "nyc2", and
+ "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "status": "str", # Optional. Current state of the Dedicated
+ Inference. Known values are: "active", "new", "provisioning", "updating",
+ "deleting", and "error".
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was last updated.
+ "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference.
+ },
+ "token": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Access token for
+ authenticating to Dedicated Inference endpoints.
+ "id": "str", # Optional. Unique ID of the token.
+ "name": "str", # Optional. Name of the token.
+ "value": "str" # Optional. Token value; only returned once on
+ create. Store securely.
+ }
+ }
+ """
+
+ @overload
+ async def create(
+ self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Create a Dedicated Inference.
+
+ Create a new Dedicated Inference for your team. Send a POST request to
+ ``/v2/dedicated-inferences`` with a ``spec`` object (version, name, region, vpc,
+ enable_public_endpoint, model_deployments) and optional ``access_tokens`` (e.g.
+ hugging_face_token for gated models). The response code 202 Accepted indicates
+ the request was accepted for processing; it does not indicate success or failure.
+ The token value is returned only on create; store it securely.
+
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 202
+ response == {
+ "dedicated_inference": {
+ "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was created.
+ "endpoints": {
+ "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN
+ of the Dedicated Inference instance.
+ "public_endpoint_fqdn": "str" # Optional. Public FQDN of the
+ Dedicated Inference instance.
+ },
+ "id": "str", # Optional. Unique ID of the Dedicated Inference.
+ "pending_deployment_spec": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "enable_public_endpoint": bool, # Optional. Whether to
+ expose a public LLM endpoint.
+ "id": "str", # Optional. Deployment UUID.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Optional. Name of the Dedicated Inference.
+ Must be unique within the team.
+ "status": "str", # Optional. Known values are:
+ "provisioning" and "updating".
+ "updated_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "version": 0, # Optional. Spec version.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "region": "str", # Optional. DigitalOcean region where the Dedicated
+ Inference is hosted.
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public
+ LLM endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be
+ unique within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated
+ Inference is hosted. Required. Known values are: "atl1", "nyc2", and
+ "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "status": "str", # Optional. Current state of the Dedicated
+ Inference. Known values are: "active", "new", "provisioning", "updating",
+ "deleting", and "error".
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was last updated.
+ "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference.
+ },
+ "token": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Access token for
+ authenticating to Dedicated Inference endpoints.
+ "id": "str", # Optional. Unique ID of the token.
+ "name": "str", # Optional. Name of the token.
+ "value": "str" # Optional. Token value; only returned once on
+ create. Store securely.
+ }
+ }
+ """
+
@distributed_trace_async
- async def delete_kafka_schema(
- self, database_cluster_uuid: str, subject_name: str, **kwargs: Any
- ) -> Optional[JSON]:
+ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Delete a Kafka Schema by Subject Name.
+ """Create a Dedicated Inference.
- To delete a specific schema by subject name for a Kafka cluster, send a DELETE request to
- ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``.
+ Create a new Dedicated Inference for your team. Send a POST request to
+ ``/v2/dedicated-inferences`` with a ``spec`` object (version, name, region, vpc,
+ enable_public_endpoint, model_deployments) and optional ``access_tokens`` (e.g.
+ hugging_face_token for gated models). The response code 202 Accepted indicates
+ the request was accepted for processing; it does not indicate success or failure.
+ The token value is returned only on create; store it securely.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param subject_name: The name of the Kafka schema subject. Required.
- :type subject_name: str
- :return: JSON object or None
- :rtype: JSON or None
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 404
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public LLM
+ endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of accelerator
+ instances. Required.
+ "type": "str", # Accelerator type
+ (e.g. prefill_decode). Required.
+ "status": "str" # Optional. Current
+ state of the Accelerator. Known values are: "new",
+ "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to identify an
+ existing deployment when updating; empty means create new.
+ "model_provider": "str", # Optional. Model provider.
+ "hugging_face"
+ "model_slug": "str", # Optional. Model identifier
+ (e.g. Hugging Face slug).
+ "workload_config": {} # Optional. Workload-specific
+ configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be unique
+ within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated Inference
+ is hosted. Required. Known values are: "atl1", "nyc2", and "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated Inference.
+ Required.
+ }
+ },
+ "access_tokens": {
+ "str": "str" # Optional. Key-value pairs for provider tokens (e.g.
+ Hugging Face).
+ }
+ }
+
+ # response body for status code(s): 202
response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
+ "dedicated_inference": {
+ "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was created.
+ "endpoints": {
+ "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN
+ of the Dedicated Inference instance.
+ "public_endpoint_fqdn": "str" # Optional. Public FQDN of the
+ Dedicated Inference instance.
+ },
+ "id": "str", # Optional. Unique ID of the Dedicated Inference.
+ "pending_deployment_spec": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "enable_public_endpoint": bool, # Optional. Whether to
+ expose a public LLM endpoint.
+ "id": "str", # Optional. Deployment UUID.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Optional. Name of the Dedicated Inference.
+ Must be unique within the team.
+ "status": "str", # Optional. Known values are:
+ "provisioning" and "updating".
+ "updated_at": "2020-02-20 00:00:00", # Optional. Pending
+ deployment when status is provisioning or updating.
+ "version": 0, # Optional. Spec version.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "region": "str", # Optional. DigitalOcean region where the Dedicated
+ Inference is hosted.
+ "spec": {
+ "enable_public_endpoint": bool, # Whether to expose a public
+ LLM endpoint. Required.
+ "model_deployments": [
+ {
+ "accelerators": [
+ {
+ "accelerator_slug": "str", #
+ DigitalOcean GPU slug. Required.
+ "scale": 0, # Number of
+ accelerator instances. Required.
+ "type": "str", # Accelerator
+ type (e.g. prefill_decode). Required.
+ "status": "str" # Optional.
+ Current state of the Accelerator. Known values are:
+ "new", "provisioning", and "active".
+ }
+ ],
+ "model_id": "str", # Optional. Used to
+ identify an existing deployment when updating; empty means create
+ new.
+ "model_provider": "str", # Optional. Model
+ provider. "hugging_face"
+ "model_slug": "str", # Optional. Model
+ identifier (e.g. Hugging Face slug).
+ "workload_config": {} # Optional.
+ Workload-specific configuration (e.g. ISL/OSL in future).
+ }
+ ],
+ "name": "str", # Name of the Dedicated Inference. Must be
+ unique within the team. Required.
+ "region": "str", # DigitalOcean region where the Dedicated
+ Inference is hosted. Required. Known values are: "atl1", "nyc2", and
+ "tor1".
+ "version": 0, # Spec version. Required.
+ "vpc": {
+ "uuid": "str" # VPC UUID for the Dedicated
+ Inference. Required.
+ }
+ },
+ "status": "str", # Optional. Current state of the Dedicated
+ Inference. Known values are: "active", "new", "provisioning", "updating",
+ "deleting", and "error".
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated
+ Inference was last updated.
+ "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference.
+ },
+ "token": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Access token for
+ authenticating to Dedicated Inference endpoints.
+ "id": "str", # Optional. Unique ID of the token.
+ "name": "str", # Optional. Name of the token.
+ "value": "str" # Optional. Token value; only returned once on
+ create. Store securely.
+ }
}
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
@@ -120353,14 +128857,26 @@ async def delete_kafka_schema(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_delete_kafka_schema_request(
- database_cluster_uuid=database_cluster_uuid,
- subject_name=subject_name,
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_dedicated_inferences_create_request(
+ content_type=content_type,
+ json=_json,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -120375,62 +128891,59 @@ async def delete_kafka_schema(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [202]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_kafka_schema_version(
- self, database_cluster_uuid: str, subject_name: str, version: str, **kwargs: Any
+ async def list_accelerators(
+ self,
+ dedicated_inference_id: str,
+ *,
+ per_page: int = 20,
+ page: int = 1,
+ slug: Optional[str] = None,
+ **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Get Kafka Schema by Subject Version.
+ """List Dedicated Inference Accelerators.
- To get a specific schema by subject name for a Kafka cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME/versions/$VERSION``.
+ List all accelerators (GPUs) in use by a Dedicated Inference instance. Send a
+ GET request to ``/v2/dedicated-inferences/{dedicated_inference_id}/accelerators``.
+ Optionally filter by slug and use page/per_page for pagination.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param subject_name: The name of the Kafka schema subject. Required.
- :type subject_name: str
- :param version: The version of the Kafka schema subject. Required.
- :type version: str
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
+ :keyword slug: Filter accelerators by GPU slug. Default value is None.
+ :paramtype slug: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -120440,12 +128953,23 @@ async def get_kafka_schema_version(
# response body for status code(s): 200
response == {
- "schema": "str", # Optional. The schema definition in the specified format.
- "schema_id": 0, # Optional. The id for schema.
- "schema_type": "str", # Optional. The type of the schema. Known values are:
- "AVRO", "JSON", and "PROTOBUF".
- "subject_name": "str", # Optional. The name of the schema subject.
- "version": "str" # Optional. The version of the schema.
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "accelerators": [
+ {
+ "created_at": "2020-02-20 00:00:00", # Optional.
+ "id": "str", # Optional. Unique ID of the accelerator.
+ "name": "str", # Optional. Name of the accelerator.
+ "role": "str", # Optional. Role of the accelerator (e.g.
+ prefill_decode).
+ "slug": "str", # Optional. DigitalOcean GPU slug.
+ "status": "str" # Optional. Status of the accelerator.
+ }
+ ],
+ "links": {
+ "pages": {}
+ }
}
# response body for status code(s): 404
response == {
@@ -120477,10 +129001,11 @@ async def get_kafka_schema_version(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_kafka_schema_version_request(
- database_cluster_uuid=database_cluster_uuid,
- subject_name=subject_name,
- version=version,
+ _request = build_dedicated_inferences_list_accelerators_request(
+ dedicated_inference_id=dedicated_inference_id,
+ per_page=per_page,
+ page=page,
+ slug=slug,
headers=_headers,
params=_params,
)
@@ -120540,19 +129065,21 @@ async def get_kafka_schema_version(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_kafka_schema_config(
- self, database_cluster_uuid: str, **kwargs: Any
+ async def get_accelerator(
+ self, dedicated_inference_id: str, accelerator_id: str, **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Retrieve Schema Registry Configuration for a kafka Cluster.
+ """Get a Dedicated Inference Accelerator.
- To retrieve the Schema Registry configuration for a Kafka cluster, send a GET request to
- ``/v2/databases/$DATABASE_ID/schema-registry/config``.
- The response is a JSON object with a ``compatibility_level`` key, which is set to an object
- containing any database configuration parameters.
+ Retrieve a single accelerator by ID for a Dedicated Inference instance. Send a
+ GET request to
+ ``/v2/dedicated-inferences/{dedicated_inference_id}/accelerators/{accelerator_id}``.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
+ :param accelerator_id: A unique identifier for a Dedicated Inference accelerator. Required.
+ :type accelerator_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -120562,9 +129089,12 @@ async def get_kafka_schema_config(
# response body for status code(s): 200
response == {
- "compatibility_level": "str" # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ "created_at": "2020-02-20 00:00:00", # Optional.
+ "id": "str", # Optional. Unique ID of the accelerator.
+ "name": "str", # Optional. Name of the accelerator.
+ "role": "str", # Optional. Role of the accelerator (e.g. prefill_decode).
+ "slug": "str", # Optional. DigitalOcean GPU slug.
+ "status": "str" # Optional. Status of the accelerator.
}
# response body for status code(s): 404
response == {
@@ -120596,8 +129126,9 @@ async def get_kafka_schema_config(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_kafka_schema_config_request(
- database_cluster_uuid=database_cluster_uuid,
+ _request = build_dedicated_inferences_get_accelerator_request(
+ dedicated_inference_id=dedicated_inference_id,
+ accelerator_id=accelerator_id,
headers=_headers,
params=_params,
)
@@ -120656,132 +129187,18 @@ async def get_kafka_schema_config(
return cast(JSON, deserialized) # type: ignore
- @overload
- async def update_kafka_schema_config(
- self,
- database_cluster_uuid: str,
- body: Optional[JSON] = None,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> JSON:
- # pylint: disable=line-too-long
- """Update Schema Registry Configuration for a kafka Cluster.
-
- To update the Schema Registry configuration for a Kafka cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/schema-registry/config``.
- The response is a JSON object with a ``compatibility_level`` key, which is set to an object
- containing any database configuration parameters.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Default value is None.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "compatibility_level": "str" # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
- }
-
- # response body for status code(s): 200
- response == {
- "compatibility_level": "str" # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
-
- @overload
- async def update_kafka_schema_config(
- self,
- database_cluster_uuid: str,
- body: Optional[IO[bytes]] = None,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> JSON:
- # pylint: disable=line-too-long
- """Update Schema Registry Configuration for a kafka Cluster.
-
- To update the Schema Registry configuration for a Kafka cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/schema-registry/config``.
- The response is a JSON object with a ``compatibility_level`` key, which is set to an object
- containing any database configuration parameters.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Default value is None.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # response body for status code(s): 200
- response == {
- "compatibility_level": "str" # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
-
@distributed_trace_async
- async def update_kafka_schema_config(
- self,
- database_cluster_uuid: str,
- body: Optional[Union[JSON, IO[bytes]]] = None,
- **kwargs: Any
- ) -> JSON:
+ async def get_ca(self, dedicated_inference_id: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Update Schema Registry Configuration for a kafka Cluster.
+ """Get Dedicated Inference CA Certificate.
- To update the Schema Registry configuration for a Kafka cluster, send a PUT request to
- ``/v2/databases/$DATABASE_ID/schema-registry/config``.
- The response is a JSON object with a ``compatibility_level`` key, which is set to an object
- containing any database configuration parameters.
+ Get the CA certificate for a Dedicated Inference instance (base64-encoded).
+ Required for private endpoint connectivity. Send a GET request to
+ ``/v2/dedicated-inferences/{dedicated_inference_id}/ca``.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
- :type body: JSON or IO[bytes]
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -120789,18 +129206,9 @@ async def update_kafka_schema_config(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "compatibility_level": "str" # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
- }
-
# response body for status code(s): 200
response == {
- "compatibility_level": "str" # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ "cert": "str" # Base64-encoded CA certificate. Required.
}
# response body for status code(s): 404
response == {
@@ -120827,30 +129235,13 @@ async def update_kafka_schema_config(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
cls: ClsType[JSON] = kwargs.pop("cls", None)
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- if body is not None:
- _json = body
- else:
- _json = None
-
- _request = build_databases_update_kafka_schema_config_request(
- database_cluster_uuid=database_cluster_uuid,
- content_type=content_type,
- json=_json,
- content=_content,
+ _request = build_dedicated_inferences_get_ca_request(
+ dedicated_inference_id=dedicated_inference_id,
headers=_headers,
params=_params,
)
@@ -120910,22 +129301,28 @@ async def update_kafka_schema_config(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_kafka_schema_subject_config(
- self, database_cluster_uuid: str, subject_name: str, **kwargs: Any
+ async def list_tokens(
+ self,
+ dedicated_inference_id: str,
+ *,
+ per_page: int = 20,
+ page: int = 1,
+ **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Retrieve Schema Registry Configuration for a Subject of kafka Cluster.
+ """List Dedicated Inference Tokens.
- To retrieve the Schema Registry configuration for a Subject of a Kafka cluster, send a GET
- request to
- ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``.
- The response is a JSON object with a ``compatibility_level`` key, which is set to an object
- containing any database configuration parameters.
+ List all access tokens for a Dedicated Inference instance. Token values are
+ not returned; only id, name, and created_at. Send a GET request to
+ ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens``.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param subject_name: The name of the Kafka schema subject. Required.
- :type subject_name: str
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -120935,10 +129332,21 @@ async def get_kafka_schema_subject_config(
# response body for status code(s): 200
response == {
- "compatibility_level": "str", # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
- "subject_name": "str" # The name of the schema subject. Required.
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "links": {
+ "pages": {}
+ },
+ "tokens": [
+ {
+ "created_at": "2020-02-20 00:00:00", # Optional.
+ "id": "str", # Optional. Unique ID of the token.
+ "name": "str", # Optional. Name of the token.
+ "value": "str" # Optional. Token value; only returned once
+ on create. Store securely.
+ }
+ ]
}
# response body for status code(s): 404
response == {
@@ -120970,9 +129378,10 @@ async def get_kafka_schema_subject_config(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_get_kafka_schema_subject_config_request(
- database_cluster_uuid=database_cluster_uuid,
- subject_name=subject_name,
+ _request = build_dedicated_inferences_list_tokens_request(
+ dedicated_inference_id=dedicated_inference_id,
+ per_page=per_page,
+ page=page,
headers=_headers,
params=_params,
)
@@ -121032,29 +129441,25 @@ async def get_kafka_schema_subject_config(
return cast(JSON, deserialized) # type: ignore
@overload
- async def update_kafka_schema_subject_config(
+ async def create_tokens(
self,
- database_cluster_uuid: str,
- subject_name: str,
- body: Optional[JSON] = None,
+ dedicated_inference_id: str,
+ body: JSON,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update Schema Registry Configuration for a Subject of kafka Cluster.
+ """Create a Dedicated Inference Token.
- To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT
- request to
- ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``.
- The response is a JSON object with a ``compatibility_level`` key, which is set to an object
- containing any database configuration parameters.
+ Create a new access token for a Dedicated Inference instance. Send a POST
+ request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens`` with a
+ ``name``. The token value is returned only once in the response; store it securely.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param subject_name: The name of the Kafka schema subject. Required.
- :type subject_name: str
- :param body: Default value is None.
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
+ :param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
@@ -121068,17 +129473,19 @@ async def update_kafka_schema_subject_config(
# JSON input template you can fill out and use as your body input.
body = {
- "compatibility_level": "str" # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ "name": "str" # Name for the new token. Required.
}
- # response body for status code(s): 200
+ # response body for status code(s): 202
response == {
- "compatibility_level": "str", # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
- "subject_name": "str" # The name of the schema subject. Required.
+ "token": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Access token for
+ authenticating to Dedicated Inference endpoints.
+ "id": "str", # Optional. Unique ID of the token.
+ "name": "str", # Optional. Name of the token.
+ "value": "str" # Optional. Token value; only returned once on
+ create. Store securely.
+ }
}
# response body for status code(s): 404
response == {
@@ -121094,29 +129501,25 @@ async def update_kafka_schema_subject_config(
"""
@overload
- async def update_kafka_schema_subject_config(
+ async def create_tokens(
self,
- database_cluster_uuid: str,
- subject_name: str,
- body: Optional[IO[bytes]] = None,
+ dedicated_inference_id: str,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update Schema Registry Configuration for a Subject of kafka Cluster.
+ """Create a Dedicated Inference Token.
- To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT
- request to
- ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``.
- The response is a JSON object with a ``compatibility_level`` key, which is set to an object
- containing any database configuration parameters.
+ Create a new access token for a Dedicated Inference instance. Send a POST
+ request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens`` with a
+ ``name``. The token value is returned only once in the response; store it securely.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param subject_name: The name of the Kafka schema subject. Required.
- :type subject_name: str
- :param body: Default value is None.
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
+ :param body: Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
@@ -121128,12 +129531,16 @@ async def update_kafka_schema_subject_config(
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # response body for status code(s): 202
response == {
- "compatibility_level": "str", # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
- "subject_name": "str" # The name of the schema subject. Required.
+ "token": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Access token for
+ authenticating to Dedicated Inference endpoints.
+ "id": "str", # Optional. Unique ID of the token.
+ "name": "str", # Optional. Name of the token.
+ "value": "str" # Optional. Token value; only returned once on
+ create. Store securely.
+ }
}
# response body for status code(s): 404
response == {
@@ -121149,27 +129556,20 @@ async def update_kafka_schema_subject_config(
"""
@distributed_trace_async
- async def update_kafka_schema_subject_config(
- self,
- database_cluster_uuid: str,
- subject_name: str,
- body: Optional[Union[JSON, IO[bytes]]] = None,
- **kwargs: Any
+ async def create_tokens(
+ self, dedicated_inference_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update Schema Registry Configuration for a Subject of kafka Cluster.
+ """Create a Dedicated Inference Token.
- To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT
- request to
- ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``.
- The response is a JSON object with a ``compatibility_level`` key, which is set to an object
- containing any database configuration parameters.
+ Create a new access token for a Dedicated Inference instance. Send a POST
+ request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens`` with a
+ ``name``. The token value is returned only once in the response; store it securely.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param subject_name: The name of the Kafka schema subject. Required.
- :type subject_name: str
- :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
@@ -121180,17 +129580,19 @@ async def update_kafka_schema_subject_config(
# JSON input template you can fill out and use as your body input.
body = {
- "compatibility_level": "str" # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
+ "name": "str" # Name for the new token. Required.
}
- # response body for status code(s): 200
+ # response body for status code(s): 202
response == {
- "compatibility_level": "str", # The compatibility level of the schema
- registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE",
- "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE".
- "subject_name": "str" # The name of the schema subject. Required.
+ "token": {
+ "created_at": "2020-02-20 00:00:00", # Optional. Access token for
+ authenticating to Dedicated Inference endpoints.
+ "id": "str", # Optional. Unique ID of the token.
+ "name": "str", # Optional. Name of the token.
+ "value": "str" # Optional. Token value; only returned once on
+ create. Store securely.
+ }
}
# response body for status code(s): 404
response == {
@@ -121231,14 +129633,10 @@ async def update_kafka_schema_subject_config(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- if body is not None:
- _json = body
- else:
- _json = None
+ _json = body
- _request = build_databases_update_kafka_schema_subject_config_request(
- database_cluster_uuid=database_cluster_uuid,
- subject_name=subject_name,
+ _request = build_dedicated_inferences_create_tokens_request(
+ dedicated_inference_id=dedicated_inference_id,
content_type=content_type,
json=_json,
content=_content,
@@ -121256,14 +129654,14 @@ async def update_kafka_schema_subject_config(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [202, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 202:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -121301,32 +129699,27 @@ async def update_kafka_schema_subject_config(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON:
+ async def delete_tokens(
+ self, dedicated_inference_id: str, token_id: str, **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Retrieve Database Clusters' Metrics Endpoint Credentials.
+ """Revoke a Dedicated Inference Token.
- To show the credentials for all database clusters' metrics endpoints, send a GET request to
- ``/v2/databases/metrics/credentials``. The result will be a JSON object with a ``credentials``
- key.
+ Revoke (delete) an access token for a Dedicated Inference instance. Send a
+ DELETE request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens/{token_id}``.
- :return: JSON object
- :rtype: JSON
+ :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance.
+ Required.
+ :type dedicated_inference_id: str
+ :param token_id: A unique identifier for a Dedicated Inference access token. Required.
+ :type token_id: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
- response == {
- "credentials": {
- "credentials": {
- "basic_auth_password": "str", # Optional. basic
- authentication password for metrics HTTP endpoint.
- "basic_auth_username": "str" # Optional. basic
- authentication username for metrics HTTP endpoint.
- }
- }
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -121355,9 +129748,11 @@ async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON:
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_databases_get_cluster_metrics_credentials_request(
+ _request = build_dedicated_inferences_delete_tokens_request(
+ dedicated_inference_id=dedicated_inference_id,
+ token_id=token_id,
headers=_headers,
params=_params,
)
@@ -121372,14 +129767,15 @@ async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -121390,11 +129786,6 @@ async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON:
"int", response.headers.get("ratelimit-reset")
)
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -121412,98 +129803,37 @@ async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON:
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
-
- @overload
- async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements
- self,
- body: Optional[JSON] = None,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> None:
- """Update Database Clusters' Metrics Endpoint Credentials.
-
- To update the credentials for all database clusters' metrics endpoints, send a PUT request to
- ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content
- status code with no body in response.
-
- :param body: Default value is None.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: None
- :rtype: None
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "credentials": {
- "basic_auth_password": "str", # Optional. basic authentication
- password for metrics HTTP endpoint.
- "basic_auth_username": "str" # Optional. basic authentication
- username for metrics HTTP endpoint.
- }
- }
- """
-
- @overload
- async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements
- self,
- body: Optional[IO[bytes]] = None,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> None:
- """Update Database Clusters' Metrics Endpoint Credentials.
-
- To update the credentials for all database clusters' metrics endpoints, send a PUT request to
- ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content
- status code with no body in response.
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- :param body: Default value is None.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: None
- :rtype: None
- :raises ~azure.core.exceptions.HttpResponseError:
- """
+ return deserialized # type: ignore
@distributed_trace_async
- async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements
- self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any
- ) -> None:
- """Update Database Clusters' Metrics Endpoint Credentials.
+ async def list_sizes(self, **kwargs: Any) -> JSON:
+ """List Dedicated Inference Sizes.
- To update the credentials for all database clusters' metrics endpoints, send a PUT request to
- ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content
- status code with no body in response.
+ Get available Dedicated Inference sizes and pricing for supported GPUs. Send a
+ GET request to ``/v2/dedicated-inferences/sizes``.
- :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
- :type body: JSON or IO[bytes]
- :return: None
- :rtype: None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "credentials": {
- "basic_auth_password": "str", # Optional. basic authentication
- password for metrics HTTP endpoint.
- "basic_auth_username": "str" # Optional. basic authentication
- username for metrics HTTP endpoint.
- }
+ # response body for status code(s): 200
+ response == {
+ "enabled_regions": [
+ "str" # Optional. Regions where Dedicated Inference is available.
+ ],
+ "sizes": [
+ {
+ "currency": "str", # Optional.
+ "gpu_slug": "str", # Optional.
+ "price_per_hour": "str", # Optional.
+ "region": "str" # Optional.
+ }
+ ]
}
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
@@ -121519,29 +129849,12 @@ async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-re
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
- cls: ClsType[None] = kwargs.pop("cls", None)
-
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- if body is not None:
- _json = body
- else:
- _json = None
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_update_cluster_metrics_credentials_request(
- content_type=content_type,
- json=_json,
- content=_content,
+ _request = build_dedicated_inferences_list_sizes_request(
headers=_headers,
params=_params,
)
@@ -121556,7 +129869,7 @@ async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-re
response = pipeline_response.http_response
- if response.status_code not in [204]:
+ if response.status_code not in [200]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -121573,23 +129886,24 @@ async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-re
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if cls:
- return cls(pipeline_response, None, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- @distributed_trace_async
- async def list_opeasearch_indexes(
- self, database_cluster_uuid: str, **kwargs: Any
- ) -> JSON:
- # pylint: disable=line-too-long
- """List Indexes for a OpenSearch Cluster.
+ return cast(JSON, deserialized) # type: ignore
- To list all of a OpenSearch cluster's indexes, send a GET request to
- ``/v2/databases/$DATABASE_ID/indexes``.
+ @distributed_trace_async
+ async def get_gpu_model_config(self, **kwargs: Any) -> JSON:
+ """Get Dedicated Inference GPU Model Config.
- The result will be a JSON object with a ``indexes`` key.
+ Get supported GPU and model configurations for Dedicated Inference. Use this to
+ discover supported GPU slugs and model slugs (e.g. Hugging Face). Send a GET
+ request to ``/v2/dedicated-inferences/gpu-model-config``.
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -121599,35 +129913,18 @@ async def list_opeasearch_indexes(
# response body for status code(s): 200
response == {
- "indexes": [
+ "gpu_model_configs": [
{
- "created_time": "2020-02-20 00:00:00", # Optional. The date
- and time the index was created.
- "health": "str", # Optional. The health of the OpenSearch
- index. Known values are: "unknown", "green", "yellow", "red", and "red*".
- "index_name": "str", # Optional. The name of the opensearch
- index.
- "number_of_replicas": 0, # Optional. The number of replicas
- for the index.
- "number_of_shards": 0, # Optional. The number of shards for
- the index.
- "size": 0, # Optional. The size of the index.
- "status": "str" # Optional. The status of the OpenSearch
- index. Known values are: "unknown", "open", "close", and "none".
+ "gpu_slugs": [
+ "str" # Optional.
+ ],
+ "is_gated_model": bool, # Optional. Whether the model
+ requires gated access (e.g. Hugging Face token).
+ "model_name": "str", # Optional.
+ "model_slug": "str" # Optional.
}
]
}
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
404: ResourceNotFoundError,
@@ -121647,8 +129944,7 @@ async def list_opeasearch_indexes(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_databases_list_opeasearch_indexes_request(
- database_cluster_uuid=database_cluster_uuid,
+ _request = build_dedicated_inferences_get_gpu_model_config_request(
headers=_headers,
params=_params,
)
@@ -121663,160 +129959,32 @@ async def list_opeasearch_indexes(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [200]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
-
- @distributed_trace_async
- async def delete_opensearch_index(
- self, database_cluster_uuid: str, index_name: str, **kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Delete Index for OpenSearch Cluster.
-
- To delete a single index within OpenSearch cluster, send a DELETE request
- to ``/v2/databases/$DATABASE_ID/indexes/$INDEX_NAME``.
-
- A status of 204 will be given. This indicates that the request was
- processed successfully, but that no response body is needed.
-
- :param database_cluster_uuid: A unique identifier for a database cluster. Required.
- :type database_cluster_uuid: str
- :param index_name: The name of the OpenSearch index. Required.
- :type index_name: str
- :return: JSON object or None
- :rtype: JSON or None
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
-
- _request = build_databases_delete_opensearch_index_request(
- database_cluster_uuid=database_cluster_uuid,
- index_name=index_name,
- headers=_headers,
- params=_params,
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
)
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- await self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
)
- response = pipeline_response.http_response
-
- if response.status_code not in [204, 404]:
- if _stream:
- await response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- deserialized = None
- response_headers = {}
- if response.status_code == 204:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
class DomainsOperations:
@@ -136325,6 +144493,897 @@ async def delete_trigger(
return deserialized # type: ignore
+class FunctionsAccessKeyOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~pydo.aio.GeneratedClient`'s
+ :attr:`functions_access_key` attribute.
+ """
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = (
+ input_args.pop(0) if input_args else kwargs.pop("deserializer")
+ )
+
+ @distributed_trace_async
+ async def list(self, namespace_id: str, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """List Namespace Access Keys.
+
+ Lists all access keys for a serverless functions namespace.
+
+ To list access keys, send a GET request to ``/v2/functions/namespaces/{namespace_id}/keys``.
+
+ :param namespace_id: The ID of the namespace to be managed. Required.
+ :type namespace_id: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "access_keys": [
+ {
+ "created_at": "2020-02-20 00:00:00", # Optional. The date
+ and time the key was created.
+ "expires_at": "2020-02-20 00:00:00", # Optional. When the
+ key expires (null for non-expiring keys).
+ "id": "str", # Optional. The access key's unique identifier
+ with prefix 'dof"" *v1*"" '.
+ "name": "str", # Optional. The access key's name.
+ "updated_at": "2020-02-20 00:00:00" # Optional. The date and
+ time the key was last updated.
+ }
+ ],
+ "count": 0 # Optional. Total number of access keys.
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_functions_access_key_list_request(
+ namespace_id=namespace_id,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @overload
+ async def create(
+ self,
+ namespace_id: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Create a Namespace Access Key.
+
+ Creates a new access key for a serverless functions namespace.
+ The access key can be used to authenticate requests to the namespace's functions.
+ The secret key is only returned once upon creation.
+
+ To create an access key, send a POST request to
+ ``/v2/functions/namespaces/{namespace_id}/keys``.
+
+ :param namespace_id: The ID of the namespace to be managed. Required.
+ :type namespace_id: str
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "name": "str", # The access key's name. Required.
+ "expires_in": "str" # Optional. The duration after which the access key
+ expires, specified as a human-readable duration string in the format ``h``
+ (hours) or ``d`` (days). Minimum value is ``1h``. If omitted, the key will
+ never expire.
+ }
+
+ # response body for status code(s): 201
+ response == {
+ "access_key": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The date and time
+ the key was created.
+ "expires_at": "2020-02-20 00:00:00", # Optional. When the key
+ expires (null for non-expiring keys).
+ "id": "str", # Optional. The access key's unique identifier with
+ prefix 'dof"" *v1*"" '.
+ "name": "str", # Optional. The access key's name.
+ "secret": "str", # Optional. The secret key used to authenticate.
+ This is only returned once upon creation. Make sure to copy and securely
+ store it.
+ "updated_at": "2020-02-20 00:00:00" # Optional. The date and time
+ the key was last updated.
+ }
+ }
+ # response body for status code(s): 400, 404, 409
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def create(
+ self,
+ namespace_id: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Create a Namespace Access Key.
+
+ Creates a new access key for a serverless functions namespace.
+ The access key can be used to authenticate requests to the namespace's functions.
+ The secret key is only returned once upon creation.
+
+ To create an access key, send a POST request to
+ ``/v2/functions/namespaces/{namespace_id}/keys``.
+
+ :param namespace_id: The ID of the namespace to be managed. Required.
+ :type namespace_id: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 201
+ response == {
+ "access_key": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The date and time
+ the key was created.
+ "expires_at": "2020-02-20 00:00:00", # Optional. When the key
+ expires (null for non-expiring keys).
+ "id": "str", # Optional. The access key's unique identifier with
+ prefix 'dof"" *v1*"" '.
+ "name": "str", # Optional. The access key's name.
+ "secret": "str", # Optional. The secret key used to authenticate.
+ This is only returned once upon creation. Make sure to copy and securely
+ store it.
+ "updated_at": "2020-02-20 00:00:00" # Optional. The date and time
+ the key was last updated.
+ }
+ }
+ # response body for status code(s): 400, 404, 409
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def create(
+ self, namespace_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Create a Namespace Access Key.
+
+ Creates a new access key for a serverless functions namespace.
+ The access key can be used to authenticate requests to the namespace's functions.
+ The secret key is only returned once upon creation.
+
+ To create an access key, send a POST request to
+ ``/v2/functions/namespaces/{namespace_id}/keys``.
+
+ :param namespace_id: The ID of the namespace to be managed. Required.
+ :type namespace_id: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "name": "str", # The access key's name. Required.
+ "expires_in": "str" # Optional. The duration after which the access key
+ expires, specified as a human-readable duration string in the format ``h``
+ (hours) or ``d`` (days). Minimum value is ``1h``. If omitted, the key will
+ never expire.
+ }
+
+ # response body for status code(s): 201
+ response == {
+ "access_key": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The date and time
+ the key was created.
+ "expires_at": "2020-02-20 00:00:00", # Optional. When the key
+ expires (null for non-expiring keys).
+ "id": "str", # Optional. The access key's unique identifier with
+ prefix 'dof"" *v1*"" '.
+ "name": "str", # Optional. The access key's name.
+ "secret": "str", # Optional. The secret key used to authenticate.
+ This is only returned once upon creation. Make sure to copy and securely
+ store it.
+ "updated_at": "2020-02-20 00:00:00" # Optional. The date and time
+ the key was last updated.
+ }
+ }
+ # response body for status code(s): 400, 404, 409
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = kwargs.pop("params", {}) or {}
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_functions_access_key_create_request(
+ namespace_id=namespace_id,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201, 400, 404, 409]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 400:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 409:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @overload
+ async def update(
+ self,
+ namespace_id: str,
+ key_id: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update a Namespace Access Key.
+
+ Updates the name of an access key for a serverless functions namespace.
+
+ To update an access key, send a PUT request to
+ ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``.
+
+ :param namespace_id: The ID of the namespace to be managed. Required.
+ :type namespace_id: str
+ :param key_id: The ID of the access key to be managed. Required.
+ :type key_id: str
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "name": "str" # The new name for the access key. Required.
+ }
+
+ # response body for status code(s): 200
+ response == {
+ "access_key": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The date and time
+ the key was created.
+ "expires_at": "2020-02-20 00:00:00", # Optional. When the key
+ expires (null for non-expiring keys).
+ "id": "str", # Optional. The access key's unique identifier with
+ prefix 'dof"" *v1*"" '.
+ "name": "str", # Optional. The access key's name.
+ "updated_at": "2020-02-20 00:00:00" # Optional. The date and time
+ the key was last updated.
+ }
+ }
+ # response body for status code(s): 400, 404, 409
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def update(
+ self,
+ namespace_id: str,
+ key_id: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update a Namespace Access Key.
+
+ Updates the name of an access key for a serverless functions namespace.
+
+ To update an access key, send a PUT request to
+ ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``.
+
+ :param namespace_id: The ID of the namespace to be managed. Required.
+ :type namespace_id: str
+ :param key_id: The ID of the access key to be managed. Required.
+ :type key_id: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "access_key": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The date and time
+ the key was created.
+ "expires_at": "2020-02-20 00:00:00", # Optional. When the key
+ expires (null for non-expiring keys).
+ "id": "str", # Optional. The access key's unique identifier with
+ prefix 'dof"" *v1*"" '.
+ "name": "str", # Optional. The access key's name.
+ "updated_at": "2020-02-20 00:00:00" # Optional. The date and time
+ the key was last updated.
+ }
+ }
+ # response body for status code(s): 400, 404, 409
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def update(
+ self,
+ namespace_id: str,
+ key_id: str,
+ body: Union[JSON, IO[bytes]],
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update a Namespace Access Key.
+
+ Updates the name of an access key for a serverless functions namespace.
+
+ To update an access key, send a PUT request to
+ ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``.
+
+ :param namespace_id: The ID of the namespace to be managed. Required.
+ :type namespace_id: str
+ :param key_id: The ID of the access key to be managed. Required.
+ :type key_id: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "name": "str" # The new name for the access key. Required.
+ }
+
+ # response body for status code(s): 200
+ response == {
+ "access_key": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The date and time
+ the key was created.
+ "expires_at": "2020-02-20 00:00:00", # Optional. When the key
+ expires (null for non-expiring keys).
+ "id": "str", # Optional. The access key's unique identifier with
+ prefix 'dof"" *v1*"" '.
+ "name": "str", # Optional. The access key's name.
+ "updated_at": "2020-02-20 00:00:00" # Optional. The date and time
+ the key was last updated.
+ }
+ }
+ # response body for status code(s): 400, 404, 409
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = kwargs.pop("params", {}) or {}
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_functions_access_key_update_request(
+ namespace_id=namespace_id,
+ key_id=key_id,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 400, 404, 409]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 400:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 409:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def delete(self, namespace_id: str, key_id: str, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """Delete a Namespace Access Key.
+
+ Deletes an access key for a serverless functions namespace.
+
+ To delete an access key, send a DELETE request to
+ ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``.
+
+ :param namespace_id: The ID of the namespace to be managed. Required.
+ :type namespace_id: str
+ :param key_id: The ID of the access key to be managed. Required.
+ :type key_id: str
+ :return: JSON or JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_functions_access_key_delete_request(
+ namespace_id=namespace_id,
+ key_id=key_id,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+
class ImagesOperations:
"""
.. warning::
@@ -169341,88 +178400,1004 @@ async def create(
To create your container registry, send a POST request to ``/v2/registry``.
- The ``name`` becomes part of the URL for images stored in the registry. For
- example, if your registry is called ``example``\\ , an image in it will have the
- URL ``registry.digitalocean.com/example/image:tag``.
+ The ``name`` becomes part of the URL for images stored in the registry. For
+ example, if your registry is called ``example``\\ , an image in it will have the
+ URL ``registry.digitalocean.com/example/image:tag``.
+
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 201
+ response == {
+ "registry": {
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the registry
+ was created.
+ "name": "str", # Optional. A globally unique name for the container
+ registry. Must be lowercase and be composed only of numbers, letters and
+ ``-``"" , up to a limit of 63 characters.
+ "region": "str", # Optional. Slug of the region where registry data
+ is stored.
+ "storage_usage_bytes": 0, # Optional. The amount of storage used in
+ the registry in bytes.
+ "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional.
+ The time at which the storage usage was updated. Storage usage is calculated
+ asynchronously, and may not immediately reflect pushes to the registry.
+ "subscription": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The time at
+ which the subscription was created.
+ "tier": {
+ "allow_storage_overage": bool, # Optional. A boolean
+ indicating whether the subscription tier supports additional storage
+ above what is included in the base plan at an additional cost per GiB
+ used.
+ "included_bandwidth_bytes": 0, # Optional. The
+ amount of outbound data transfer included in the subscription tier in
+ bytes.
+ "included_repositories": 0, # Optional. The number
+ of repositories included in the subscription tier. ``0`` indicates
+ that the subscription tier includes unlimited repositories.
+ "included_storage_bytes": 0, # Optional. The amount
+ of storage included in the subscription tier in bytes.
+ "monthly_price_in_cents": 0, # Optional. The monthly
+ cost of the subscription tier in cents.
+ "name": "str", # Optional. The name of the
+ subscription tier.
+ "slug": "str", # Optional. The slug identifier of
+ the subscription tier.
+ "storage_overage_price_in_cents": 0 # Optional. The
+ price paid in cents per GiB for additional storage beyond what is
+ included in the subscription plan.
+ },
+ "updated_at": "2020-02-20 00:00:00" # Optional. The time at
+ which the subscription was last updated.
+ }
+ }
+ }
+ """
+
+ @distributed_trace_async
+ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """Create Container Registry.
+
+ **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+
+ To create your container registry, send a POST request to ``/v2/registry``.
+
+ The ``name`` becomes part of the URL for images stored in the registry. For
+ example, if your registry is called ``example``\\ , an image in it will have the
+ URL ``registry.digitalocean.com/example/image:tag``.
+
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "name": "str", # A globally unique name for the container registry. Must be
+ lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of
+ 63 characters. Required.
+ "subscription_tier_slug": "str", # The slug of the subscription tier to sign
+ up for. Valid values can be retrieved using the options endpoint. Required. Known
+ values are: "starter", "basic", and "professional".
+ "region": "str" # Optional. Slug of the region where registry data is
+ stored. When not provided, a region will be selected. Known values are: "nyc3",
+ "sfo3", "ams3", "sgp1", and "fra1".
+ }
+
+ # response body for status code(s): 201
+ response == {
+ "registry": {
+ "created_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the registry
+ was created.
+ "name": "str", # Optional. A globally unique name for the container
+ registry. Must be lowercase and be composed only of numbers, letters and
+ ``-``"" , up to a limit of 63 characters.
+ "region": "str", # Optional. Slug of the region where registry data
+ is stored.
+ "storage_usage_bytes": 0, # Optional. The amount of storage used in
+ the registry in bytes.
+ "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional.
+ The time at which the storage usage was updated. Storage usage is calculated
+ asynchronously, and may not immediately reflect pushes to the registry.
+ "subscription": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The time at
+ which the subscription was created.
+ "tier": {
+ "allow_storage_overage": bool, # Optional. A boolean
+ indicating whether the subscription tier supports additional storage
+ above what is included in the base plan at an additional cost per GiB
+ used.
+ "included_bandwidth_bytes": 0, # Optional. The
+ amount of outbound data transfer included in the subscription tier in
+ bytes.
+ "included_repositories": 0, # Optional. The number
+ of repositories included in the subscription tier. ``0`` indicates
+ that the subscription tier includes unlimited repositories.
+ "included_storage_bytes": 0, # Optional. The amount
+ of storage included in the subscription tier in bytes.
+ "monthly_price_in_cents": 0, # Optional. The monthly
+ cost of the subscription tier in cents.
+ "name": "str", # Optional. The name of the
+ subscription tier.
+ "slug": "str", # Optional. The slug identifier of
+ the subscription tier.
+ "storage_overage_price_in_cents": 0 # Optional. The
+ price paid in cents per GiB for additional storage beyond what is
+ included in the subscription plan.
+ },
+ "updated_at": "2020-02-20 00:00:00" # Optional. The time at
+ which the subscription was last updated.
+ }
+ }
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = kwargs.pop("params", {}) or {}
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_registry_create_request(
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def delete(self, **kwargs: Any) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Delete Container Registry.
+
+ **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+
+ To delete your container registry, destroying all container image
+ data stored in it, send a DELETE request to ``/v2/registry``.
+
+ This operation is not compatible with multiple registries in a DO account. You should use
+ ``/v2/registries/{registry_name}`` instead.
+
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 404, 412
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+
+ _request = build_registry_delete_request(
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204, 404, 412]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 204:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 412:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def get_subscription(self, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """Get Subscription.
+
+ **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+
+ A subscription is automatically created when you configure your
+ container registry. To get information about your subscription, send a GET
+ request to ``/v2/registry/subscription``.
+
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "subscription": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The time at which
+ the subscription was created.
+ "tier": {
+ "allow_storage_overage": bool, # Optional. A boolean
+ indicating whether the subscription tier supports additional storage
+ above what is included in the base plan at an additional cost per GiB
+ used.
+ "included_bandwidth_bytes": 0, # Optional. The amount of
+ outbound data transfer included in the subscription tier in bytes.
+ "included_repositories": 0, # Optional. The number of
+ repositories included in the subscription tier. ``0`` indicates that the
+ subscription tier includes unlimited repositories.
+ "included_storage_bytes": 0, # Optional. The amount of
+ storage included in the subscription tier in bytes.
+ "monthly_price_in_cents": 0, # Optional. The monthly cost of
+ the subscription tier in cents.
+ "name": "str", # Optional. The name of the subscription
+ tier.
+ "slug": "str", # Optional. The slug identifier of the
+ subscription tier.
+ "storage_overage_price_in_cents": 0 # Optional. The price
+ paid in cents per GiB for additional storage beyond what is included in
+ the subscription plan.
+ },
+ "updated_at": "2020-02-20 00:00:00" # Optional. The time at which
+ the subscription was last updated.
+ }
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_registry_get_subscription_request(
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @overload
+ async def update_subscription(
+ self,
+ body: Optional[JSON] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update Subscription Tier.
+
+ **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+
+ After creating your registry, you can switch to a different
+ subscription tier to better suit your needs. To do this, send a POST request
+ to ``/v2/registry/subscription``.
+
+ :param body: Default value is None.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "tier_slug": "str" # Optional. The slug of the subscription tier to sign up
+ for. Known values are: "starter", "basic", and "professional".
+ }
+
+ # response body for status code(s): 200
+ response == {
+ "subscription": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The time at which
+ the subscription was created.
+ "tier": {
+ "allow_storage_overage": bool, # Optional. A boolean
+ indicating whether the subscription tier supports additional storage
+ above what is included in the base plan at an additional cost per GiB
+ used.
+ "included_bandwidth_bytes": 0, # Optional. The amount of
+ outbound data transfer included in the subscription tier in bytes.
+ "included_repositories": 0, # Optional. The number of
+ repositories included in the subscription tier. ``0`` indicates that the
+ subscription tier includes unlimited repositories.
+ "included_storage_bytes": 0, # Optional. The amount of
+ storage included in the subscription tier in bytes.
+ "monthly_price_in_cents": 0, # Optional. The monthly cost of
+ the subscription tier in cents.
+ "name": "str", # Optional. The name of the subscription
+ tier.
+ "slug": "str", # Optional. The slug identifier of the
+ subscription tier.
+ "storage_overage_price_in_cents": 0 # Optional. The price
+ paid in cents per GiB for additional storage beyond what is included in
+ the subscription plan.
+ },
+ "updated_at": "2020-02-20 00:00:00" # Optional. The time at which
+ the subscription was last updated.
+ }
+ }
+ # response body for status code(s): 412
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def update_subscription(
+ self,
+ body: Optional[IO[bytes]] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update Subscription Tier.
+
+ **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+
+ After creating your registry, you can switch to a different
+ subscription tier to better suit your needs. To do this, send a POST request
+ to ``/v2/registry/subscription``.
+
+ :param body: Default value is None.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "subscription": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The time at which
+ the subscription was created.
+ "tier": {
+ "allow_storage_overage": bool, # Optional. A boolean
+ indicating whether the subscription tier supports additional storage
+ above what is included in the base plan at an additional cost per GiB
+ used.
+ "included_bandwidth_bytes": 0, # Optional. The amount of
+ outbound data transfer included in the subscription tier in bytes.
+ "included_repositories": 0, # Optional. The number of
+ repositories included in the subscription tier. ``0`` indicates that the
+ subscription tier includes unlimited repositories.
+ "included_storage_bytes": 0, # Optional. The amount of
+ storage included in the subscription tier in bytes.
+ "monthly_price_in_cents": 0, # Optional. The monthly cost of
+ the subscription tier in cents.
+ "name": "str", # Optional. The name of the subscription
+ tier.
+ "slug": "str", # Optional. The slug identifier of the
+ subscription tier.
+ "storage_overage_price_in_cents": 0 # Optional. The price
+ paid in cents per GiB for additional storage beyond what is included in
+ the subscription plan.
+ },
+ "updated_at": "2020-02-20 00:00:00" # Optional. The time at which
+ the subscription was last updated.
+ }
+ }
+ # response body for status code(s): 412
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def update_subscription(
+ self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update Subscription Tier.
+
+ **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+
+ After creating your registry, you can switch to a different
+ subscription tier to better suit your needs. To do this, send a POST request
+ to ``/v2/registry/subscription``.
+
+ :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
+ :type body: JSON or IO[bytes]
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "tier_slug": "str" # Optional. The slug of the subscription tier to sign up
+ for. Known values are: "starter", "basic", and "professional".
+ }
+
+ # response body for status code(s): 200
+ response == {
+ "subscription": {
+ "created_at": "2020-02-20 00:00:00", # Optional. The time at which
+ the subscription was created.
+ "tier": {
+ "allow_storage_overage": bool, # Optional. A boolean
+ indicating whether the subscription tier supports additional storage
+ above what is included in the base plan at an additional cost per GiB
+ used.
+ "included_bandwidth_bytes": 0, # Optional. The amount of
+ outbound data transfer included in the subscription tier in bytes.
+ "included_repositories": 0, # Optional. The number of
+ repositories included in the subscription tier. ``0`` indicates that the
+ subscription tier includes unlimited repositories.
+ "included_storage_bytes": 0, # Optional. The amount of
+ storage included in the subscription tier in bytes.
+ "monthly_price_in_cents": 0, # Optional. The monthly cost of
+ the subscription tier in cents.
+ "name": "str", # Optional. The name of the subscription
+ tier.
+ "slug": "str", # Optional. The slug identifier of the
+ subscription tier.
+ "storage_overage_price_in_cents": 0 # Optional. The price
+ paid in cents per GiB for additional storage beyond what is included in
+ the subscription plan.
+ },
+ "updated_at": "2020-02-20 00:00:00" # Optional. The time at which
+ the subscription was last updated.
+ }
+ }
+ # response body for status code(s): 412
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = kwargs.pop("params", {}) or {}
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = body
+ else:
+ _json = None
+
+ _request = build_registry_update_subscription_request(
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 412]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 412:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def get_docker_credentials(
+ self, *, expiry_seconds: int = 0, read_write: bool = False, **kwargs: Any
+ ) -> JSON:
+ """Get Docker Credentials for Container Registry.
+
+ **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+
+ In order to access your container registry with the Docker client or from a
+ Kubernetes cluster, you will need to configure authentication. The necessary
+ JSON configuration can be retrieved by sending a GET request to
+ ``/v2/registry/docker-credentials``.
+
+ The response will be in the format of a Docker ``config.json`` file. To use the
+ config in your Kubernetes cluster, create a Secret with:
+
+ .. code-block::
+
+ kubectl create secret generic docr \\
+ --from-file=.dockerconfigjson=config.json \\
+ --type=kubernetes.io/dockerconfigjson
+
+
+ By default, the returned credentials have read-only access to your registry
+ and cannot be used to push images. This is appropriate for most Kubernetes
+ clusters. To retrieve read/write credentials, suitable for use with the Docker
+ client or in a CI system, read_write may be provided as query parameter. For
+ example: ``/v2/registry/docker-credentials?read_write=true``
+
+ By default, the returned credentials will not expire. To retrieve credentials
+ with an expiry set, expiry_seconds may be provided as a query parameter. For
+ example: ``/v2/registry/docker-credentials?expiry_seconds=3600`` will return
+ credentials that expire after one hour.
+
+ :keyword expiry_seconds: The duration in seconds that the returned registry credentials will be
+ valid. If not set or 0, the credentials will not expire. Default value is 0.
+ :paramtype expiry_seconds: int
+ :keyword read_write: By default, the registry credentials allow for read-only access. Set this
+ query parameter to ``true`` to obtain read-write credentials. Default value is False.
+ :paramtype read_write: bool
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "auths": {
+ "registry.digitalocean.com": {
+ "auth": "str" # Optional. A base64 encoded string containing
+ credentials for the container registry.
+ }
+ }
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_registry_get_docker_credentials_request(
+ expiry_seconds=expiry_seconds,
+ read_write=read_write,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @overload
+ async def validate_name(
+ self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Validate a Container Registry Name.
+
+ **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+
+ To validate that a container registry name is available for use, send a POST
+ request to ``/v2/registry/validate-name``.
+
+ If the name is both formatted correctly and available, the response code will
+ be 204 and contain no body. If the name is already in use, the response will
+ be a 409 Conflict.
+
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object or None
+ :rtype: JSON or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "name": "str" # A globally unique name for the container registry. Must be
+ lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of
+ 63 characters. Required.
+ }
+
+ # response body for status code(s): 409
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def validate_name(
+ self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Validate a Container Registry Name.
+
+ **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+
+ To validate that a container registry name is available for use, send a POST
+ request to ``/v2/registry/validate-name``.
+
+ If the name is both formatted correctly and available, the response code will
+ be 204 and contain no body. If the name is already in use, the response will
+ be a 409 Conflict.
:param body: Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 201
+ # response body for status code(s): 409
response == {
- "registry": {
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the registry
- was created.
- "name": "str", # Optional. A globally unique name for the container
- registry. Must be lowercase and be composed only of numbers, letters and
- ``-``"" , up to a limit of 63 characters.
- "region": "str", # Optional. Slug of the region where registry data
- is stored.
- "storage_usage_bytes": 0, # Optional. The amount of storage used in
- the registry in bytes.
- "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional.
- The time at which the storage usage was updated. Storage usage is calculated
- asynchronously, and may not immediately reflect pushes to the registry.
- "subscription": {
- "created_at": "2020-02-20 00:00:00", # Optional. The time at
- which the subscription was created.
- "tier": {
- "allow_storage_overage": bool, # Optional. A boolean
- indicating whether the subscription tier supports additional storage
- above what is included in the base plan at an additional cost per GiB
- used.
- "included_bandwidth_bytes": 0, # Optional. The
- amount of outbound data transfer included in the subscription tier in
- bytes.
- "included_repositories": 0, # Optional. The number
- of repositories included in the subscription tier. ``0`` indicates
- that the subscription tier includes unlimited repositories.
- "included_storage_bytes": 0, # Optional. The amount
- of storage included in the subscription tier in bytes.
- "monthly_price_in_cents": 0, # Optional. The monthly
- cost of the subscription tier in cents.
- "name": "str", # Optional. The name of the
- subscription tier.
- "slug": "str", # Optional. The slug identifier of
- the subscription tier.
- "storage_overage_price_in_cents": 0 # Optional. The
- price paid in cents per GiB for additional storage beyond what is
- included in the subscription plan.
- },
- "updated_at": "2020-02-20 00:00:00" # Optional. The time at
- which the subscription was last updated.
- }
- }
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
}
"""
@distributed_trace_async
- async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
+ async def validate_name(
+ self, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Create Container Registry.
+ """Validate a Container Registry Name.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- To create your container registry, send a POST request to ``/v2/registry``.
+ To validate that a container registry name is available for use, send a POST
+ request to ``/v2/registry/validate-name``.
- The ``name`` becomes part of the URL for images stored in the registry. For
- example, if your registry is called ``example``\\ , an image in it will have the
- URL ``registry.digitalocean.com/example/image:tag``.
+ If the name is both formatted correctly and available, the response code will
+ be 204 and contain no body. If the name is already in use, the response will
+ be a 409 Conflict.
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -169430,63 +179405,21 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
# JSON input template you can fill out and use as your body input.
body = {
- "name": "str", # A globally unique name for the container registry. Must be
+ "name": "str" # A globally unique name for the container registry. Must be
lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of
63 characters. Required.
- "subscription_tier_slug": "str", # The slug of the subscription tier to sign
- up for. Valid values can be retrieved using the options endpoint. Required. Known
- values are: "starter", "basic", and "professional".
- "region": "str" # Optional. Slug of the region where registry data is
- stored. When not provided, a region will be selected. Known values are: "nyc3",
- "sfo3", "ams3", "sgp1", and "fra1".
}
- # response body for status code(s): 201
+ # response body for status code(s): 409
response == {
- "registry": {
- "created_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the registry
- was created.
- "name": "str", # Optional. A globally unique name for the container
- registry. Must be lowercase and be composed only of numbers, letters and
- ``-``"" , up to a limit of 63 characters.
- "region": "str", # Optional. Slug of the region where registry data
- is stored.
- "storage_usage_bytes": 0, # Optional. The amount of storage used in
- the registry in bytes.
- "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional.
- The time at which the storage usage was updated. Storage usage is calculated
- asynchronously, and may not immediately reflect pushes to the registry.
- "subscription": {
- "created_at": "2020-02-20 00:00:00", # Optional. The time at
- which the subscription was created.
- "tier": {
- "allow_storage_overage": bool, # Optional. A boolean
- indicating whether the subscription tier supports additional storage
- above what is included in the base plan at an additional cost per GiB
- used.
- "included_bandwidth_bytes": 0, # Optional. The
- amount of outbound data transfer included in the subscription tier in
- bytes.
- "included_repositories": 0, # Optional. The number
- of repositories included in the subscription tier. ``0`` indicates
- that the subscription tier includes unlimited repositories.
- "included_storage_bytes": 0, # Optional. The amount
- of storage included in the subscription tier in bytes.
- "monthly_price_in_cents": 0, # Optional. The monthly
- cost of the subscription tier in cents.
- "name": "str", # Optional. The name of the
- subscription tier.
- "slug": "str", # Optional. The slug identifier of
- the subscription tier.
- "storage_overage_price_in_cents": 0 # Optional. The
- price paid in cents per GiB for additional storage beyond what is
- included in the subscription plan.
- },
- "updated_at": "2020-02-20 00:00:00" # Optional. The time at
- which the subscription was last updated.
- }
- }
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
}
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
@@ -169508,7 +179441,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -169518,7 +179451,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
else:
_json = body
- _request = build_registry_create_request(
+ _request = build_registry_validate_name_request(
content_type=content_type,
json=_json,
content=_content,
@@ -169536,54 +179469,109 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [201]:
+ if response.status_code not in [204, 409]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ if response.status_code == 204:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.status_code == 409:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
@distributed_trace_async
- async def delete(self, **kwargs: Any) -> Optional[JSON]:
+ async def list_repositories(
+ self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Delete Container Registry.
+ """List All Container Registry Repositories.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- To delete your container registry, destroying all container image
- data stored in it, send a DELETE request to ``/v2/registry``.
+ This endpoint has been deprecated in favor of the *List All Container Registry Repositories
+ [V2]* endpoint.
- This operation is not compatible with multiple registries in a DO account. You should use
- ``/v2/registries/{registry_name}`` instead.
+ To list all repositories in your container registry, send a GET
+ request to ``/v2/registry/$REGISTRY_NAME/repositories``.
- :return: JSON object or None
- :rtype: JSON or None
+ :param registry_name: The name of a container registry. Required.
+ :type registry_name: str
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 404, 412
+ # response body for status code(s): 200
+ response == {
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "links": {
+ "pages": {}
+ },
+ "repositories": [
+ {
+ "latest_tag": {
+ "compressed_size_bytes": 0, # Optional. The
+ compressed size of the tag in bytes.
+ "manifest_digest": "str", # Optional. The digest of
+ the manifest associated with the tag.
+ "registry_name": "str", # Optional. The name of the
+ container registry.
+ "repository": "str", # Optional. The name of the
+ repository.
+ "size_bytes": 0, # Optional. The uncompressed size
+ of the tag in bytes (this size is calculated asynchronously so it may
+ not be immediately available).
+ "tag": "str", # Optional. The name of the tag.
+ "updated_at": "2020-02-20 00:00:00" # Optional. The
+ time the tag was last updated.
+ },
+ "name": "str", # Optional. The name of the repository.
+ "registry_name": "str", # Optional. The name of the
+ container registry.
+ "tag_count": 0 # Optional. The number of tags in the
+ repository.
+ }
+ ]
+ }
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -169611,9 +179599,12 @@ async def delete(self, **kwargs: Any) -> Optional[JSON]:
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_registry_delete_request(
+ _request = build_registry_list_repositories_request(
+ registry_name=registry_name,
+ per_page=per_page,
+ page=page,
headers=_headers,
params=_params,
)
@@ -169628,26 +179619,14 @@ async def delete(self, **kwargs: Any) -> Optional[JSON]:
response = pipeline_response.http_response
- if response.status_code not in [204, 404, 412]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.status_code == 404:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -169663,7 +179642,7 @@ async def delete(self, **kwargs: Any) -> Optional[JSON]:
else:
deserialized = None
- if response.status_code == 412:
+ if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -169680,21 +179659,38 @@ async def delete(self, **kwargs: Any) -> Optional[JSON]:
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_subscription(self, **kwargs: Any) -> JSON:
+ async def list_repositories_v2(
+ self,
+ registry_name: str,
+ *,
+ per_page: int = 20,
+ page: int = 1,
+ page_token: Optional[str] = None,
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Get Subscription.
+ """List All Container Registry Repositories (V2).
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- A subscription is automatically created when you configure your
- container registry. To get information about your subscription, send a GET
- request to ``/v2/registry/subscription``.
+ To list all repositories in your container registry, send a GET
+ request to ``/v2/registry/$REGISTRY_NAME/repositoriesV2``.
+ :param registry_name: The name of a container registry. Required.
+ :type registry_name: str
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Ignored when 'page_token' is
+ provided. Default value is 1.
+ :paramtype page: int
+ :keyword page_token: Token to retrieve of the next or previous set of results more quickly than
+ using 'page'. Default value is None.
+ :paramtype page_token: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -169704,34 +179700,60 @@ async def get_subscription(self, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "subscription": {
- "created_at": "2020-02-20 00:00:00", # Optional. The time at which
- the subscription was created.
- "tier": {
- "allow_storage_overage": bool, # Optional. A boolean
- indicating whether the subscription tier supports additional storage
- above what is included in the base plan at an additional cost per GiB
- used.
- "included_bandwidth_bytes": 0, # Optional. The amount of
- outbound data transfer included in the subscription tier in bytes.
- "included_repositories": 0, # Optional. The number of
- repositories included in the subscription tier. ``0`` indicates that the
- subscription tier includes unlimited repositories.
- "included_storage_bytes": 0, # Optional. The amount of
- storage included in the subscription tier in bytes.
- "monthly_price_in_cents": 0, # Optional. The monthly cost of
- the subscription tier in cents.
- "name": "str", # Optional. The name of the subscription
- tier.
- "slug": "str", # Optional. The slug identifier of the
- subscription tier.
- "storage_overage_price_in_cents": 0 # Optional. The price
- paid in cents per GiB for additional storage beyond what is included in
- the subscription plan.
- },
- "updated_at": "2020-02-20 00:00:00" # Optional. The time at which
- the subscription was last updated.
- }
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "links": {
+ "pages": {}
+ },
+ "repositories": [
+ {
+ "latest_manifest": {
+ "blobs": [
+ {
+ "compressed_size_bytes": 0, #
+ Optional. The compressed size of the blob in bytes.
+ "digest": "str" # Optional. The
+ digest of the blob.
+ }
+ ],
+ "compressed_size_bytes": 0, # Optional. The
+ compressed size of the manifest in bytes.
+ "digest": "str", # Optional. The manifest digest.
+ "registry_name": "str", # Optional. The name of the
+ container registry.
+ "repository": "str", # Optional. The name of the
+ repository.
+ "size_bytes": 0, # Optional. The uncompressed size
+ of the manifest in bytes (this size is calculated asynchronously so
+ it may not be immediately available).
+ "tags": [
+ "str" # Optional. All tags associated with
+ this manifest.
+ ],
+ "updated_at": "2020-02-20 00:00:00" # Optional. The
+ time the manifest was last updated.
+ },
+ "manifest_count": 0, # Optional. The number of manifests in
+ the repository.
+ "name": "str", # Optional. The name of the repository.
+ "registry_name": "str", # Optional. The name of the
+ container registry.
+ "tag_count": 0 # Optional. The number of tags in the
+ repository.
+ }
+ ]
+ }
+ # response body for status code(s): 400, 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
}
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
@@ -169752,7 +179774,11 @@ async def get_subscription(self, **kwargs: Any) -> JSON:
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_registry_get_subscription_request(
+ _request = build_registry_list_repositories_v2_request(
+ registry_name=registry_name,
+ per_page=per_page,
+ page=page,
+ page_token=page_token,
headers=_headers,
params=_params,
)
@@ -169767,55 +179793,98 @@ async def get_subscription(self, **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [200]:
+ if response.status_code not in [200, 400, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 400:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
return cast(JSON, deserialized) # type: ignore
- @overload
- async def update_subscription(
+ @distributed_trace_async
+ async def list_repository_tags(
self,
- body: Optional[JSON] = None,
+ registry_name: str,
+ repository_name: str,
*,
- content_type: str = "application/json",
+ per_page: int = 20,
+ page: int = 1,
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update Subscription Tier.
+ """List All Container Registry Repository Tags.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- After creating your registry, you can switch to a different
- subscription tier to better suit your needs. To do this, send a POST request
- to ``/v2/registry/subscription``.
+ To list all tags in your container registry repository, send a GET
+ request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags``.
- :param body: Default value is None.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
+ Note that if your repository name contains ``/`` characters, it must be
+ URL-encoded in the request URL. For example, to list tags for
+ ``registry.digitalocean.com/example/my/repo``\\ , the path would be
+ ``/v2/registry/example/repositories/my%2Frepo/tags``.
+
+ :param registry_name: The name of a container registry. Required.
+ :type registry_name: str
+ :param repository_name: The name of a container registry repository. If the name contains ``/``
+ characters, they must be URL-encoded, e.g. ``%2F``. Required.
+ :type repository_name: str
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -169823,44 +179892,33 @@ async def update_subscription(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "tier_slug": "str" # Optional. The slug of the subscription tier to sign up
- for. Known values are: "starter", "basic", and "professional".
- }
-
# response body for status code(s): 200
response == {
- "subscription": {
- "created_at": "2020-02-20 00:00:00", # Optional. The time at which
- the subscription was created.
- "tier": {
- "allow_storage_overage": bool, # Optional. A boolean
- indicating whether the subscription tier supports additional storage
- above what is included in the base plan at an additional cost per GiB
- used.
- "included_bandwidth_bytes": 0, # Optional. The amount of
- outbound data transfer included in the subscription tier in bytes.
- "included_repositories": 0, # Optional. The number of
- repositories included in the subscription tier. ``0`` indicates that the
- subscription tier includes unlimited repositories.
- "included_storage_bytes": 0, # Optional. The amount of
- storage included in the subscription tier in bytes.
- "monthly_price_in_cents": 0, # Optional. The monthly cost of
- the subscription tier in cents.
- "name": "str", # Optional. The name of the subscription
- tier.
- "slug": "str", # Optional. The slug identifier of the
- subscription tier.
- "storage_overage_price_in_cents": 0 # Optional. The price
- paid in cents per GiB for additional storage beyond what is included in
- the subscription plan.
- },
- "updated_at": "2020-02-20 00:00:00" # Optional. The time at which
- the subscription was last updated.
- }
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "links": {
+ "pages": {}
+ },
+ "tags": [
+ {
+ "compressed_size_bytes": 0, # Optional. The compressed size
+ of the tag in bytes.
+ "manifest_digest": "str", # Optional. The digest of the
+ manifest associated with the tag.
+ "registry_name": "str", # Optional. The name of the
+ container registry.
+ "repository": "str", # Optional. The name of the repository.
+ "size_bytes": 0, # Optional. The uncompressed size of the
+ tag in bytes (this size is calculated asynchronously so it may not be
+ immediately available).
+ "tag": "str", # Optional. The name of the tag.
+ "updated_at": "2020-02-20 00:00:00" # Optional. The time the
+ tag was last updated.
+ }
+ ]
}
- # response body for status code(s): 412
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -169872,68 +179930,126 @@ async def update_subscription(
tickets to help identify the issue.
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
- @overload
- async def update_subscription(
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_registry_list_repository_tags_request(
+ registry_name=registry_name,
+ repository_name=repository_name,
+ per_page=per_page,
+ page=page,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def delete_repository_tag(
self,
- body: Optional[IO[bytes]] = None,
- *,
- content_type: str = "application/json",
+ registry_name: str,
+ repository_name: str,
+ repository_tag: str,
**kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Update Subscription Tier.
+ """Delete Container Registry Repository Tag.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- After creating your registry, you can switch to a different
- subscription tier to better suit your needs. To do this, send a POST request
- to ``/v2/registry/subscription``.
+ To delete a container repository tag, send a DELETE request to
+ ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags/$TAG``.
- :param body: Default value is None.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ Note that if your repository name contains ``/`` characters, it must be
+ URL-encoded in the request URL. For example, to delete
+ ``registry.digitalocean.com/example/my/repo:mytag``\\ , the path would be
+ ``/v2/registry/example/repositories/my%2Frepo/tags/mytag``.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ :param registry_name: The name of a container registry. Required.
+ :type registry_name: str
+ :param repository_name: The name of a container registry repository. If the name contains ``/``
+ characters, they must be URL-encoded, e.g. ``%2F``. Required.
+ :type repository_name: str
+ :param repository_tag: The name of a container registry repository tag. Required.
+ :type repository_tag: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
- response == {
- "subscription": {
- "created_at": "2020-02-20 00:00:00", # Optional. The time at which
- the subscription was created.
- "tier": {
- "allow_storage_overage": bool, # Optional. A boolean
- indicating whether the subscription tier supports additional storage
- above what is included in the base plan at an additional cost per GiB
- used.
- "included_bandwidth_bytes": 0, # Optional. The amount of
- outbound data transfer included in the subscription tier in bytes.
- "included_repositories": 0, # Optional. The number of
- repositories included in the subscription tier. ``0`` indicates that the
- subscription tier includes unlimited repositories.
- "included_storage_bytes": 0, # Optional. The amount of
- storage included in the subscription tier in bytes.
- "monthly_price_in_cents": 0, # Optional. The monthly cost of
- the subscription tier in cents.
- "name": "str", # Optional. The name of the subscription
- tier.
- "slug": "str", # Optional. The slug identifier of the
- subscription tier.
- "storage_overage_price_in_cents": 0 # Optional. The price
- paid in cents per GiB for additional storage beyond what is included in
- the subscription plan.
- },
- "updated_at": "2020-02-20 00:00:00" # Optional. The time at which
- the subscription was last updated.
- }
- }
- # response body for status code(s): 412
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -169945,22 +180061,114 @@ async def update_subscription(
tickets to help identify the issue.
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+
+ _request = build_registry_delete_repository_tag_request(
+ registry_name=registry_name,
+ repository_name=repository_name,
+ repository_tag=repository_tag,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 204:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
@distributed_trace_async
- async def update_subscription(
- self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any
+ async def list_repository_manifests(
+ self,
+ registry_name: str,
+ repository_name: str,
+ *,
+ per_page: int = 20,
+ page: int = 1,
+ **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update Subscription Tier.
+ """List All Container Registry Repository Manifests.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- After creating your registry, you can switch to a different
- subscription tier to better suit your needs. To do this, send a POST request
- to ``/v2/registry/subscription``.
+ To list all manifests in your container registry repository, send a GET
+ request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests``.
- :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
- :type body: JSON or IO[bytes]
+ Note that if your repository name contains ``/`` characters, it must be
+ URL-encoded in the request URL. For example, to list manifests for
+ ``registry.digitalocean.com/example/my/repo``\\ , the path would be
+ ``/v2/registry/example/repositories/my%2Frepo/digests``.
+
+ :param registry_name: The name of a container registry. Required.
+ :type registry_name: str
+ :param repository_name: The name of a container registry repository. If the name contains ``/``
+ characters, they must be URL-encoded, e.g. ``%2F``. Required.
+ :type repository_name: str
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -169968,44 +180176,43 @@ async def update_subscription(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "tier_slug": "str" # Optional. The slug of the subscription tier to sign up
- for. Known values are: "starter", "basic", and "professional".
- }
-
# response body for status code(s): 200
response == {
- "subscription": {
- "created_at": "2020-02-20 00:00:00", # Optional. The time at which
- the subscription was created.
- "tier": {
- "allow_storage_overage": bool, # Optional. A boolean
- indicating whether the subscription tier supports additional storage
- above what is included in the base plan at an additional cost per GiB
- used.
- "included_bandwidth_bytes": 0, # Optional. The amount of
- outbound data transfer included in the subscription tier in bytes.
- "included_repositories": 0, # Optional. The number of
- repositories included in the subscription tier. ``0`` indicates that the
- subscription tier includes unlimited repositories.
- "included_storage_bytes": 0, # Optional. The amount of
- storage included in the subscription tier in bytes.
- "monthly_price_in_cents": 0, # Optional. The monthly cost of
- the subscription tier in cents.
- "name": "str", # Optional. The name of the subscription
- tier.
- "slug": "str", # Optional. The slug identifier of the
- subscription tier.
- "storage_overage_price_in_cents": 0 # Optional. The price
- paid in cents per GiB for additional storage beyond what is included in
- the subscription plan.
- },
- "updated_at": "2020-02-20 00:00:00" # Optional. The time at which
- the subscription was last updated.
- }
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "links": {
+ "pages": {}
+ },
+ "manifests": [
+ {
+ "blobs": [
+ {
+ "compressed_size_bytes": 0, # Optional. The
+ compressed size of the blob in bytes.
+ "digest": "str" # Optional. The digest of
+ the blob.
+ }
+ ],
+ "compressed_size_bytes": 0, # Optional. The compressed size
+ of the manifest in bytes.
+ "digest": "str", # Optional. The manifest digest.
+ "registry_name": "str", # Optional. The name of the
+ container registry.
+ "repository": "str", # Optional. The name of the repository.
+ "size_bytes": 0, # Optional. The uncompressed size of the
+ manifest in bytes (this size is calculated asynchronously so it may not
+ be immediately available).
+ "tags": [
+ "str" # Optional. All tags associated with this
+ manifest.
+ ],
+ "updated_at": "2020-02-20 00:00:00" # Optional. The time the
+ manifest was last updated.
+ }
+ ]
}
- # response body for status code(s): 412
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -170030,29 +180237,16 @@ async def update_subscription(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
cls: ClsType[JSON] = kwargs.pop("cls", None)
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- if body is not None:
- _json = body
- else:
- _json = None
-
- _request = build_registry_update_subscription_request(
- content_type=content_type,
- json=_json,
- content=_content,
+ _request = build_registry_list_repository_manifests_request(
+ registry_name=registry_name,
+ repository_name=repository_name,
+ per_page=per_page,
+ page=page,
headers=_headers,
params=_params,
)
@@ -170067,7 +180261,7 @@ async def update_subscription(
response = pipeline_response.http_response
- if response.status_code not in [200, 412]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -170090,7 +180284,7 @@ async def update_subscription(
else:
deserialized = None
- if response.status_code == 412:
+ if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -170112,60 +180306,53 @@ async def update_subscription(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_docker_credentials(
- self, *, expiry_seconds: int = 0, read_write: bool = False, **kwargs: Any
- ) -> JSON:
- """Get Docker Credentials for Container Registry.
+ async def delete_repository_manifest(
+ self,
+ registry_name: str,
+ repository_name: str,
+ manifest_digest: str,
+ **kwargs: Any
+ ) -> Optional[JSON]:
+ # pylint: disable=line-too-long
+ """Delete Container Registry Repository Manifest.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- In order to access your container registry with the Docker client or from a
- Kubernetes cluster, you will need to configure authentication. The necessary
- JSON configuration can be retrieved by sending a GET request to
- ``/v2/registry/docker-credentials``.
-
- The response will be in the format of a Docker ``config.json`` file. To use the
- config in your Kubernetes cluster, create a Secret with:
-
- .. code-block::
-
- kubectl create secret generic docr \\
- --from-file=.dockerconfigjson=config.json \\
- --type=kubernetes.io/dockerconfigjson
-
+ To delete a container repository manifest by digest, send a DELETE request to
+ ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests/$MANIFEST_DIGEST``.
- By default, the returned credentials have read-only access to your registry
- and cannot be used to push images. This is appropriate for most Kubernetes
- clusters. To retrieve read/write credentials, suitable for use with the Docker
- client or in a CI system, read_write may be provided as query parameter. For
- example: ``/v2/registry/docker-credentials?read_write=true``
+ Note that if your repository name contains ``/`` characters, it must be
+ URL-encoded in the request URL. For example, to delete
+ ``registry.digitalocean.com/example/my/repo@sha256:abcd``\\ , the path would be
+ ``/v2/registry/example/repositories/my%2Frepo/digests/sha256:abcd``.
- By default, the returned credentials will not expire. To retrieve credentials
- with an expiry set, expiry_seconds may be provided as a query parameter. For
- example: ``/v2/registry/docker-credentials?expiry_seconds=3600`` will return
- credentials that expire after one hour.
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
- :keyword expiry_seconds: The duration in seconds that the returned registry credentials will be
- valid. If not set or 0, the credentials will not expire. Default value is 0.
- :paramtype expiry_seconds: int
- :keyword read_write: By default, the registry credentials allow for read-only access. Set this
- query parameter to ``true`` to obtain read-write credentials. Default value is False.
- :paramtype read_write: bool
- :return: JSON object
- :rtype: JSON
+ :param registry_name: The name of a container registry. Required.
+ :type registry_name: str
+ :param repository_name: The name of a container registry repository. If the name contains ``/``
+ characters, they must be URL-encoded, e.g. ``%2F``. Required.
+ :type repository_name: str
+ :param manifest_digest: The manifest digest of a container registry repository tag. Required.
+ :type manifest_digest: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # response body for status code(s): 404
response == {
- "auths": {
- "registry.digitalocean.com": {
- "auth": "str" # Optional. A base64 encoded string containing
- credentials for the container registry.
- }
- }
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
}
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
@@ -170184,11 +180371,12 @@ async def get_docker_credentials(
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_registry_get_docker_credentials_request(
- expiry_seconds=expiry_seconds,
- read_write=read_write,
+ _request = build_registry_delete_repository_manifest_request(
+ registry_name=registry_name,
+ repository_name=repository_name,
+ manifest_digest=manifest_digest,
headers=_headers,
params=_params,
)
@@ -170203,56 +180391,93 @@ async def get_docker_credentials(
response = pipeline_response.http_response
- if response.status_code not in [200]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ if response.status_code == 204:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
@overload
- async def validate_name(
- self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
- ) -> Optional[JSON]:
+ async def run_garbage_collection(
+ self,
+ registry_name: str,
+ body: Optional[JSON] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Validate a Container Registry Name.
+ """Start Garbage Collection.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- To validate that a container registry name is available for use, send a POST
- request to ``/v2/registry/validate-name``.
+ Garbage collection enables users to clear out unreferenced blobs (layer &
+ manifest data) after deleting one or more manifests from a repository. If
+ there are no unreferenced blobs resulting from the deletion of one or more
+ manifests, garbage collection is effectively a noop.
+ `See here for more information
+ `_
+ about how and why you should clean up your container registry periodically.
- If the name is both formatted correctly and available, the response code will
- be 204 and contain no body. If the name is already in use, the response will
- be a 409 Conflict.
+ To request a garbage collection run on your registry, send a POST request to
+ ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the
+ following sequence of events on your registry.
- :param body: Required.
+
+ * Set the registry to read-only mode, meaning no further write-scoped
+ JWTs will be issued to registry clients. Existing write-scoped JWTs will
+ continue to work until they expire which can take up to 15 minutes.
+ * Wait until all existing write-scoped JWTs have expired.
+ * Scan all registry manifests to determine which blobs are unreferenced.
+ * Delete all unreferenced blobs from the registry.
+ * Record the number of blobs deleted and bytes freed, mark the garbage
+ collection status as ``success``.
+ * Remove the read-only mode restriction from the registry, meaning write-scoped
+ JWTs will once again be issued to registry clients.
+
+ :param registry_name: The name of a container registry. Required.
+ :type registry_name: str
+ :param body: Default value is None.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -170260,12 +180485,33 @@ async def validate_name(
# JSON input template you can fill out and use as your body input.
body = {
- "name": "str" # A globally unique name for the container registry. Must be
- lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of
- 63 characters. Required.
+ "type": "str" # Optional. Type of the garbage collection to run against this
+ registry. Known values are: "untagged manifests only", "unreferenced blobs only",
+ and "untagged manifests and unreferenced blobs".
}
- # response body for status code(s): 409
+ # response body for status code(s): 201
+ response == {
+ "garbage_collection": {
+ "blobs_deleted": 0, # Optional. The number of blobs deleted as a
+ result of this garbage collection.
+ "created_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was created.
+ "freed_bytes": 0, # Optional. The number of bytes freed as a result
+ of this garbage collection.
+ "registry_name": "str", # Optional. The name of the container
+ registry.
+ "status": "str", # Optional. The current status of this garbage
+ collection. Known values are: "requested", "waiting for write JWTs to
+ expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
+ "failed", "succeeded", and "cancelled".
+ "updated_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was last updated.
+ "uuid": "str" # Optional. A string specifying the UUID of the
+ garbage collection.
+ }
+ }
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -170279,34 +180525,79 @@ async def validate_name(
"""
@overload
- async def validate_name(
- self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
- ) -> Optional[JSON]:
+ async def run_garbage_collection(
+ self,
+ registry_name: str,
+ body: Optional[IO[bytes]] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Validate a Container Registry Name.
+ """Start Garbage Collection.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- To validate that a container registry name is available for use, send a POST
- request to ``/v2/registry/validate-name``.
+ Garbage collection enables users to clear out unreferenced blobs (layer &
+ manifest data) after deleting one or more manifests from a repository. If
+ there are no unreferenced blobs resulting from the deletion of one or more
+ manifests, garbage collection is effectively a noop.
+ `See here for more information
+ `_
+ about how and why you should clean up your container registry periodically.
- If the name is both formatted correctly and available, the response code will
- be 204 and contain no body. If the name is already in use, the response will
- be a 409 Conflict.
+ To request a garbage collection run on your registry, send a POST request to
+ ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the
+ following sequence of events on your registry.
- :param body: Required.
+
+ * Set the registry to read-only mode, meaning no further write-scoped
+ JWTs will be issued to registry clients. Existing write-scoped JWTs will
+ continue to work until they expire which can take up to 15 minutes.
+ * Wait until all existing write-scoped JWTs have expired.
+ * Scan all registry manifests to determine which blobs are unreferenced.
+ * Delete all unreferenced blobs from the registry.
+ * Record the number of blobs deleted and bytes freed, mark the garbage
+ collection status as ``success``.
+ * Remove the read-only mode restriction from the registry, meaning write-scoped
+ JWTs will once again be issued to registry clients.
+
+ :param registry_name: The name of a container registry. Required.
+ :type registry_name: str
+ :param body: Default value is None.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 409
+ # response body for status code(s): 201
+ response == {
+ "garbage_collection": {
+ "blobs_deleted": 0, # Optional. The number of blobs deleted as a
+ result of this garbage collection.
+ "created_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was created.
+ "freed_bytes": 0, # Optional. The number of bytes freed as a result
+ of this garbage collection.
+ "registry_name": "str", # Optional. The name of the container
+ registry.
+ "status": "str", # Optional. The current status of this garbage
+ collection. Known values are: "requested", "waiting for write JWTs to
+ expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
+ "failed", "succeeded", and "cancelled".
+ "updated_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was last updated.
+ "uuid": "str" # Optional. A string specifying the UUID of the
+ garbage collection.
+ }
+ }
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -170320,25 +180611,47 @@ async def validate_name(
"""
@distributed_trace_async
- async def validate_name(
- self, body: Union[JSON, IO[bytes]], **kwargs: Any
- ) -> Optional[JSON]:
+ async def run_garbage_collection(
+ self,
+ registry_name: str,
+ body: Optional[Union[JSON, IO[bytes]]] = None,
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Validate a Container Registry Name.
+ """Start Garbage Collection.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- To validate that a container registry name is available for use, send a POST
- request to ``/v2/registry/validate-name``.
+ Garbage collection enables users to clear out unreferenced blobs (layer &
+ manifest data) after deleting one or more manifests from a repository. If
+ there are no unreferenced blobs resulting from the deletion of one or more
+ manifests, garbage collection is effectively a noop.
+ `See here for more information
+ `_
+ about how and why you should clean up your container registry periodically.
- If the name is both formatted correctly and available, the response code will
- be 204 and contain no body. If the name is already in use, the response will
- be a 409 Conflict.
+ To request a garbage collection run on your registry, send a POST request to
+ ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the
+ following sequence of events on your registry.
- :param body: Is either a JSON type or a IO[bytes] type. Required.
+
+ * Set the registry to read-only mode, meaning no further write-scoped
+ JWTs will be issued to registry clients. Existing write-scoped JWTs will
+ continue to work until they expire which can take up to 15 minutes.
+ * Wait until all existing write-scoped JWTs have expired.
+ * Scan all registry manifests to determine which blobs are unreferenced.
+ * Delete all unreferenced blobs from the registry.
+ * Record the number of blobs deleted and bytes freed, mark the garbage
+ collection status as ``success``.
+ * Remove the read-only mode restriction from the registry, meaning write-scoped
+ JWTs will once again be issued to registry clients.
+
+ :param registry_name: The name of a container registry. Required.
+ :type registry_name: str
+ :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
:type body: JSON or IO[bytes]
- :return: JSON object or None
- :rtype: JSON or None
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -170346,12 +180659,33 @@ async def validate_name(
# JSON input template you can fill out and use as your body input.
body = {
- "name": "str" # A globally unique name for the container registry. Must be
- lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of
- 63 characters. Required.
+ "type": "str" # Optional. Type of the garbage collection to run against this
+ registry. Known values are: "untagged manifests only", "unreferenced blobs only",
+ and "untagged manifests and unreferenced blobs".
}
- # response body for status code(s): 409
+ # response body for status code(s): 201
+ response == {
+ "garbage_collection": {
+ "blobs_deleted": 0, # Optional. The number of blobs deleted as a
+ result of this garbage collection.
+ "created_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was created.
+ "freed_bytes": 0, # Optional. The number of bytes freed as a result
+ of this garbage collection.
+ "registry_name": "str", # Optional. The name of the container
+ registry.
+ "status": "str", # Optional. The current status of this garbage
+ collection. Known values are: "requested", "waiting for write JWTs to
+ expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
+ "failed", "succeeded", and "cancelled".
+ "updated_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was last updated.
+ "uuid": "str" # Optional. A string specifying the UUID of the
+ garbage collection.
+ }
+ }
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -170382,7 +180716,7 @@ async def validate_name(
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -170390,9 +180724,13 @@ async def validate_name(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- _json = body
+ if body is not None:
+ _json = body
+ else:
+ _json = None
- _request = build_registry_validate_name_request(
+ _request = build_registry_run_garbage_collection_request(
+ registry_name=registry_name,
content_type=content_type,
json=_json,
content=_content,
@@ -170410,15 +180748,14 @@ async def validate_name(
response = pipeline_response.http_response
- if response.status_code not in [204, 409]:
+ if response.status_code not in [201, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 201:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -170429,7 +180766,12 @@ async def validate_name(
"int", response.headers.get("ratelimit-reset")
)
- if response.status_code == 409:
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -170446,31 +180788,22 @@ async def validate_name(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def list_repositories(
- self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any
- ) -> JSON:
+ async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """List All Container Registry Repositories.
+ """Get Active Garbage Collection.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- This endpoint has been deprecated in favor of the *List All Container Registry Repositories
- [V2]* endpoint.
-
- To list all repositories in your container registry, send a GET
- request to ``/v2/registry/$REGISTRY_NAME/repositories``.
+ To get information about the currently-active garbage collection
+ for a registry, send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collection``.
:param registry_name: The name of a container registry. Required.
:type registry_name: str
- :keyword per_page: Number of items returned per page. Default value is 20.
- :paramtype per_page: int
- :keyword page: Which 'page' of paginated results to return. Default value is 1.
- :paramtype page: int
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -170480,37 +180813,24 @@ async def list_repositories(
# response body for status code(s): 200
response == {
- "meta": {
- "total": 0 # Optional. Number of objects returned by the request.
- },
- "links": {
- "pages": {}
- },
- "repositories": [
- {
- "latest_tag": {
- "compressed_size_bytes": 0, # Optional. The
- compressed size of the tag in bytes.
- "manifest_digest": "str", # Optional. The digest of
- the manifest associated with the tag.
- "registry_name": "str", # Optional. The name of the
- container registry.
- "repository": "str", # Optional. The name of the
- repository.
- "size_bytes": 0, # Optional. The uncompressed size
- of the tag in bytes (this size is calculated asynchronously so it may
- not be immediately available).
- "tag": "str", # Optional. The name of the tag.
- "updated_at": "2020-02-20 00:00:00" # Optional. The
- time the tag was last updated.
- },
- "name": "str", # Optional. The name of the repository.
- "registry_name": "str", # Optional. The name of the
- container registry.
- "tag_count": 0 # Optional. The number of tags in the
- repository.
- }
- ]
+ "garbage_collection": {
+ "blobs_deleted": 0, # Optional. The number of blobs deleted as a
+ result of this garbage collection.
+ "created_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was created.
+ "freed_bytes": 0, # Optional. The number of bytes freed as a result
+ of this garbage collection.
+ "registry_name": "str", # Optional. The name of the container
+ registry.
+ "status": "str", # Optional. The current status of this garbage
+ collection. Known values are: "requested", "waiting for write JWTs to
+ expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
+ "failed", "succeeded", and "cancelled".
+ "updated_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was last updated.
+ "uuid": "str" # Optional. A string specifying the UUID of the
+ garbage collection.
+ }
}
# response body for status code(s): 404
response == {
@@ -170542,10 +180862,8 @@ async def list_repositories(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_registry_list_repositories_request(
+ _request = build_registry_get_garbage_collection_request(
registry_name=registry_name,
- per_page=per_page,
- page=page,
headers=_headers,
params=_params,
)
@@ -170605,33 +180923,23 @@ async def list_repositories(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def list_repositories_v2(
- self,
- registry_name: str,
- *,
- per_page: int = 20,
- page: int = 1,
- page_token: Optional[str] = None,
- **kwargs: Any
+ async def list_garbage_collections(
+ self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """List All Container Registry Repositories (V2).
+ """List Garbage Collections.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- To list all repositories in your container registry, send a GET
- request to ``/v2/registry/$REGISTRY_NAME/repositoriesV2``.
+ To get information about past garbage collections for a registry,
+ send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collections``.
:param registry_name: The name of a container registry. Required.
:type registry_name: str
:keyword per_page: Number of items returned per page. Default value is 20.
:paramtype per_page: int
- :keyword page: Which 'page' of paginated results to return. Ignored when 'page_token' is
- provided. Default value is 1.
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
:paramtype page: int
- :keyword page_token: Token to retrieve of the next or previous set of results more quickly than
- using 'page'. Default value is None.
- :paramtype page_token: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -170641,51 +180949,28 @@ async def list_repositories_v2(
# response body for status code(s): 200
response == {
- "meta": {
- "total": 0 # Optional. Number of objects returned by the request.
- },
- "links": {
- "pages": {}
- },
- "repositories": [
+ "garbage_collections": [
{
- "latest_manifest": {
- "blobs": [
- {
- "compressed_size_bytes": 0, #
- Optional. The compressed size of the blob in bytes.
- "digest": "str" # Optional. The
- digest of the blob.
- }
- ],
- "compressed_size_bytes": 0, # Optional. The
- compressed size of the manifest in bytes.
- "digest": "str", # Optional. The manifest digest.
- "registry_name": "str", # Optional. The name of the
- container registry.
- "repository": "str", # Optional. The name of the
- repository.
- "size_bytes": 0, # Optional. The uncompressed size
- of the manifest in bytes (this size is calculated asynchronously so
- it may not be immediately available).
- "tags": [
- "str" # Optional. All tags associated with
- this manifest.
- ],
- "updated_at": "2020-02-20 00:00:00" # Optional. The
- time the manifest was last updated.
- },
- "manifest_count": 0, # Optional. The number of manifests in
- the repository.
- "name": "str", # Optional. The name of the repository.
+ "blobs_deleted": 0, # Optional. The number of blobs deleted
+ as a result of this garbage collection.
+ "created_at": "2020-02-20 00:00:00", # Optional. The time
+ the garbage collection was created.
+ "freed_bytes": 0, # Optional. The number of bytes freed as a
+ result of this garbage collection.
"registry_name": "str", # Optional. The name of the
container registry.
- "tag_count": 0 # Optional. The number of tags in the
- repository.
+ "status": "str", # Optional. The current status of this
+ garbage collection. Known values are: "requested", "waiting for write
+ JWTs to expire", "scanning manifests", "deleting unreferenced blobs",
+ "cancelling", "failed", "succeeded", and "cancelled".
+ "updated_at": "2020-02-20 00:00:00", # Optional. The time
+ the garbage collection was last updated.
+ "uuid": "str" # Optional. A string specifying the UUID of
+ the garbage collection.
}
]
}
- # response body for status code(s): 400, 404
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -170715,11 +181000,10 @@ async def list_repositories_v2(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_registry_list_repositories_v2_request(
+ _request = build_registry_list_garbage_collections_request(
registry_name=registry_name,
per_page=per_page,
page=page,
- page_token=page_token,
headers=_headers,
params=_params,
)
@@ -170734,7 +181018,7 @@ async def list_repositories_v2(
response = pipeline_response.http_response
- if response.status_code not in [200, 400, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -170757,22 +181041,6 @@ async def list_repositories_v2(
else:
deserialized = None
- if response.status_code == 400:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -170794,38 +181062,34 @@ async def list_repositories_v2(
return cast(JSON, deserialized) # type: ignore
- @distributed_trace_async
- async def list_repository_tags(
+ @overload
+ async def update_garbage_collection(
self,
registry_name: str,
- repository_name: str,
+ garbage_collection_uuid: str,
+ body: JSON,
*,
- per_page: int = 20,
- page: int = 1,
+ content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """List All Container Registry Repository Tags.
+ """Update Garbage Collection.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- To list all tags in your container registry repository, send a GET
- request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags``.
-
- Note that if your repository name contains ``/`` characters, it must be
- URL-encoded in the request URL. For example, to list tags for
- ``registry.digitalocean.com/example/my/repo``\\ , the path would be
- ``/v2/registry/example/repositories/my%2Frepo/tags``.
+ To cancel the currently-active garbage collection for a registry,
+ send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID``
+ and specify one or more of the attributes below.
:param registry_name: The name of a container registry. Required.
:type registry_name: str
- :param repository_name: The name of a container registry repository. If the name contains ``/``
- characters, they must be URL-encoded, e.g. ``%2F``. Required.
- :type repository_name: str
- :keyword per_page: Number of items returned per page. Default value is 20.
- :paramtype per_page: int
- :keyword page: Which 'page' of paginated results to return. Default value is 1.
- :paramtype page: int
+ :param garbage_collection_uuid: The UUID of a garbage collection run. Required.
+ :type garbage_collection_uuid: str
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -170833,31 +181097,32 @@ async def list_repository_tags(
Example:
.. code-block:: python
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "cancel": bool # Optional. A boolean value indicating that the garbage
+ collection should be cancelled.
+ }
+
# response body for status code(s): 200
response == {
- "meta": {
- "total": 0 # Optional. Number of objects returned by the request.
- },
- "links": {
- "pages": {}
- },
- "tags": [
- {
- "compressed_size_bytes": 0, # Optional. The compressed size
- of the tag in bytes.
- "manifest_digest": "str", # Optional. The digest of the
- manifest associated with the tag.
- "registry_name": "str", # Optional. The name of the
- container registry.
- "repository": "str", # Optional. The name of the repository.
- "size_bytes": 0, # Optional. The uncompressed size of the
- tag in bytes (this size is calculated asynchronously so it may not be
- immediately available).
- "tag": "str", # Optional. The name of the tag.
- "updated_at": "2020-02-20 00:00:00" # Optional. The time the
- tag was last updated.
- }
- ]
+ "garbage_collection": {
+ "blobs_deleted": 0, # Optional. The number of blobs deleted as a
+ result of this garbage collection.
+ "created_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was created.
+ "freed_bytes": 0, # Optional. The number of bytes freed as a result
+ of this garbage collection.
+ "registry_name": "str", # Optional. The name of the container
+ registry.
+ "status": "str", # Optional. The current status of this garbage
+ collection. Known values are: "requested", "waiting for write JWTs to
+ expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
+ "failed", "succeeded", and "cancelled".
+ "updated_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was last updated.
+ "uuid": "str" # Optional. A string specifying the UUID of the
+ garbage collection.
+ }
}
# response body for status code(s): 404
response == {
@@ -170871,125 +181136,133 @@ async def list_repository_tags(
tickets to help identify the issue.
}
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- _request = build_registry_list_repository_tags_request(
- registry_name=registry_name,
- repository_name=repository_name,
- per_page=per_page,
- page=page,
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- await self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200, 404]:
- if _stream:
- await response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
- response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ @overload
+ async def update_garbage_collection(
+ self,
+ registry_name: str,
+ garbage_collection_uuid: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update Garbage Collection.
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ To cancel the currently-active garbage collection for a registry,
+ send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID``
+ and specify one or more of the attributes below.
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ :param registry_name: The name of a container registry. Required.
+ :type registry_name: str
+ :param garbage_collection_uuid: The UUID of a garbage collection run. Required.
+ :type garbage_collection_uuid: str
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ Example:
+ .. code-block:: python
- return cast(JSON, deserialized) # type: ignore
+ # response body for status code(s): 200
+ response == {
+ "garbage_collection": {
+ "blobs_deleted": 0, # Optional. The number of blobs deleted as a
+ result of this garbage collection.
+ "created_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was created.
+ "freed_bytes": 0, # Optional. The number of bytes freed as a result
+ of this garbage collection.
+ "registry_name": "str", # Optional. The name of the container
+ registry.
+ "status": "str", # Optional. The current status of this garbage
+ collection. Known values are: "requested", "waiting for write JWTs to
+ expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
+ "failed", "succeeded", and "cancelled".
+ "updated_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was last updated.
+ "uuid": "str" # Optional. A string specifying the UUID of the
+ garbage collection.
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
@distributed_trace_async
- async def delete_repository_tag(
+ async def update_garbage_collection(
self,
registry_name: str,
- repository_name: str,
- repository_tag: str,
+ garbage_collection_uuid: str,
+ body: Union[JSON, IO[bytes]],
**kwargs: Any
- ) -> Optional[JSON]:
+ ) -> JSON:
# pylint: disable=line-too-long
- """Delete Container Registry Repository Tag.
+ """Update Garbage Collection.
**Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
- To delete a container repository tag, send a DELETE request to
- ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags/$TAG``.
-
- Note that if your repository name contains ``/`` characters, it must be
- URL-encoded in the request URL. For example, to delete
- ``registry.digitalocean.com/example/my/repo:mytag``\\ , the path would be
- ``/v2/registry/example/repositories/my%2Frepo/tags/mytag``.
-
- A successful request will receive a 204 status code with no body in response.
- This indicates that the request was processed successfully.
+ To cancel the currently-active garbage collection for a registry,
+ send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID``
+ and specify one or more of the attributes below.
:param registry_name: The name of a container registry. Required.
:type registry_name: str
- :param repository_name: The name of a container registry repository. If the name contains ``/``
- characters, they must be URL-encoded, e.g. ``%2F``. Required.
- :type repository_name: str
- :param repository_tag: The name of a container registry repository tag. Required.
- :type repository_tag: str
- :return: JSON object or None
- :rtype: JSON or None
+ :param garbage_collection_uuid: The UUID of a garbage collection run. Required.
+ :type garbage_collection_uuid: str
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "cancel": bool # Optional. A boolean value indicating that the garbage
+ collection should be cancelled.
+ }
+
+ # response body for status code(s): 200
+ response == {
+ "garbage_collection": {
+ "blobs_deleted": 0, # Optional. The number of blobs deleted as a
+ result of this garbage collection.
+ "created_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was created.
+ "freed_bytes": 0, # Optional. The number of bytes freed as a result
+ of this garbage collection.
+ "registry_name": "str", # Optional. The name of the container
+ registry.
+ "status": "str", # Optional. The current status of this garbage
+ collection. Known values are: "requested", "waiting for write JWTs to
+ expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
+ "failed", "succeeded", and "cancelled".
+ "updated_at": "2020-02-20 00:00:00", # Optional. The time the
+ garbage collection was last updated.
+ "uuid": "str" # Optional. A string specifying the UUID of the
+ garbage collection.
+ }
+ }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -171015,15 +181288,28 @@ async def delete_repository_tag(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_registry_delete_repository_tag_request(
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_registry_update_garbage_collection_request(
registry_name=registry_name,
- repository_name=repository_name,
- repository_tag=repository_tag,
+ garbage_collection_uuid=garbage_collection_uuid,
+ content_type=content_type,
+ json=_json,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -171038,15 +181324,14 @@ async def delete_repository_tag(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -171057,6 +181342,11 @@ async def delete_repository_tag(
"int", response.headers.get("ratelimit-reset")
)
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -171074,42 +181364,32 @@ async def delete_repository_tag(
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def list_repository_manifests(
- self,
- registry_name: str,
- repository_name: str,
- *,
- per_page: int = 20,
- page: int = 1,
- **kwargs: Any
- ) -> JSON:
+ async def get_options(self, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """List All Container Registry Repository Manifests.
+ """List Registry Options (Subscription Tiers and Available Regions).
- **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+ **Note: This endpoint is deprecated and may be removed in a future version. There is no
+ alternative.****\\ Note: This endpoint is deprecated. Please use the ``/v2/registries``
+ endpoint instead.**
- To list all manifests in your container registry repository, send a GET
- request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests``.
+ This endpoint serves to provide additional information as to which option values
+ are available when creating a container registry.
- Note that if your repository name contains ``/`` characters, it must be
- URL-encoded in the request URL. For example, to list manifests for
- ``registry.digitalocean.com/example/my/repo``\\ , the path would be
- ``/v2/registry/example/repositories/my%2Frepo/digests``.
+ There are multiple subscription tiers available for container registry. Each
+ tier allows a different number of image repositories to be created in your
+ registry, and has a different amount of storage and transfer included.
+
+ There are multiple regions available for container registry and controls
+ where your data is stored.
+
+ To list the available options, send a GET request to
+ ``/v2/registry/options``.
- :param registry_name: The name of a container registry. Required.
- :type registry_name: str
- :param repository_name: The name of a container registry repository. If the name contains ``/``
- characters, they must be URL-encoded, e.g. ``%2F``. Required.
- :type repository_name: str
- :keyword per_page: Number of items returned per page. Default value is 20.
- :paramtype per_page: int
- :keyword page: Which 'page' of paginated results to return. Default value is 1.
- :paramtype page: int
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -171119,50 +181399,43 @@ async def list_repository_manifests(
# response body for status code(s): 200
response == {
- "meta": {
- "total": 0 # Optional. Number of objects returned by the request.
- },
- "links": {
- "pages": {}
- },
- "manifests": [
- {
- "blobs": [
- {
- "compressed_size_bytes": 0, # Optional. The
- compressed size of the blob in bytes.
- "digest": "str" # Optional. The digest of
- the blob.
- }
- ],
- "compressed_size_bytes": 0, # Optional. The compressed size
- of the manifest in bytes.
- "digest": "str", # Optional. The manifest digest.
- "registry_name": "str", # Optional. The name of the
- container registry.
- "repository": "str", # Optional. The name of the repository.
- "size_bytes": 0, # Optional. The uncompressed size of the
- manifest in bytes (this size is calculated asynchronously so it may not
- be immediately available).
- "tags": [
- "str" # Optional. All tags associated with this
- manifest.
- ],
- "updated_at": "2020-02-20 00:00:00" # Optional. The time the
- manifest was last updated.
- }
- ]
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
+ "options": {
+ "available_regions": [
+ "str" # Optional.
+ ],
+ "subscription_tiers": [
+ {
+ "allow_storage_overage": bool, # Optional. A boolean
+ indicating whether the subscription tier supports additional storage
+ above what is included in the base plan at an additional cost per GiB
+ used.
+ "eligibility_reasons": [
+ "str" # Optional. If your account is not
+ eligible to use a certain subscription tier, this will include a
+ list of reasons that prevent you from using the tier.
+ ],
+ "eligible": bool, # Optional. A boolean indicating
+ whether your account it eligible to use a certain subscription tier.
+ "included_bandwidth_bytes": 0, # Optional. The
+ amount of outbound data transfer included in the subscription tier in
+ bytes.
+ "included_repositories": 0, # Optional. The number
+ of repositories included in the subscription tier. ``0`` indicates
+ that the subscription tier includes unlimited repositories.
+ "included_storage_bytes": 0, # Optional. The amount
+ of storage included in the subscription tier in bytes.
+ "monthly_price_in_cents": 0, # Optional. The monthly
+ cost of the subscription tier in cents.
+ "name": "str", # Optional. The name of the
+ subscription tier.
+ "slug": "str", # Optional. The slug identifier of
+ the subscription tier.
+ "storage_overage_price_in_cents": 0 # Optional. The
+ price paid in cents per GiB for additional storage beyond what is
+ included in the subscription plan.
+ }
+ ]
+ }
}
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
@@ -171183,11 +181456,7 @@ async def list_repository_manifests(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_registry_list_repository_manifests_request(
- registry_name=registry_name,
- repository_name=repository_name,
- per_page=per_page,
- page=page,
+ _request = build_registry_get_options_request(
headers=_headers,
params=_params,
)
@@ -171202,98 +181471,112 @@ async def list_repository_manifests(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [200]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
return cast(JSON, deserialized) # type: ignore
- @distributed_trace_async
- async def delete_repository_manifest(
- self,
- registry_name: str,
- repository_name: str,
- manifest_digest: str,
- **kwargs: Any
- ) -> Optional[JSON]:
- # pylint: disable=line-too-long
- """Delete Container Registry Repository Manifest.
- **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+class ReservedIPsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
- To delete a container repository manifest by digest, send a DELETE request to
- ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests/$MANIFEST_DIGEST``.
+ Instead, you should access the following operations through
+ :class:`~pydo.aio.GeneratedClient`'s
+ :attr:`reserved_ips` attribute.
+ """
- Note that if your repository name contains ``/`` characters, it must be
- URL-encoded in the request URL. For example, to delete
- ``registry.digitalocean.com/example/my/repo@sha256:abcd``\\ , the path would be
- ``/v2/registry/example/repositories/my%2Frepo/digests/sha256:abcd``.
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = (
+ input_args.pop(0) if input_args else kwargs.pop("deserializer")
+ )
- A successful request will receive a 204 status code with no body in response.
- This indicates that the request was processed successfully.
+ @distributed_trace_async
+ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """List All Reserved IPs.
- :param registry_name: The name of a container registry. Required.
- :type registry_name: str
- :param repository_name: The name of a container registry repository. If the name contains ``/``
- characters, they must be URL-encoded, e.g. ``%2F``. Required.
- :type repository_name: str
- :param manifest_digest: The manifest digest of a container registry repository tag. Required.
- :type manifest_digest: str
- :return: JSON object or None
- :rtype: JSON or None
+ To list all of the reserved IPs available on your account, send a GET request to
+ ``/v2/reserved_ips``.
+
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 404
+ # response body for status code(s): 200
response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "links": {
+ "pages": {}
+ },
+ "reserved_ips": [
+ {
+ "droplet": {},
+ "ip": "str", # Optional. The public IP address of the
+ reserved IP. It also serves as its identifier.
+ "locked": bool, # Optional. A boolean value indicating
+ whether or not the reserved IP has pending actions preventing new ones
+ from being submitted.
+ "project_id": "str", # Optional. The UUID of the project to
+ which the reserved IP currently belongs.:code:`
`:code:`
`Requires
+ ``project:read`` scope.
+ "region": {
+ "available": bool, # This is a boolean value that
+ represents whether new Droplets can be created in this region.
+ Required.
+ "features": [
+ "str" # This attribute is set to an array
+ which contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region.
+ This will be a full name that is used in the control panel and other
+ interfaces. Required.
+ "sizes": [
+ "str" # This attribute is set to an array
+ which contains the identifying slugs for the sizes available in
+ this region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used
+ as a unique identifier for each region. Required.
+ }
+ }
+ ]
}
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
@@ -171312,12 +181595,11 @@ async def delete_repository_manifest(
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_registry_delete_repository_manifest_request(
- registry_name=registry_name,
- repository_name=repository_name,
- manifest_digest=manifest_digest,
+ _request = build_reserved_ips_list_request(
+ per_page=per_page,
+ page=page,
headers=_headers,
params=_params,
)
@@ -171332,87 +181614,52 @@ async def delete_repository_manifest(
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [200]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@overload
- async def run_garbage_collection(
- self,
- registry_name: str,
- body: Optional[JSON] = None,
- *,
- content_type: str = "application/json",
- **kwargs: Any
+ async def create(
+ self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Start Garbage Collection.
-
- **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+ """Create a New Reserved IP.
- Garbage collection enables users to clear out unreferenced blobs (layer &
- manifest data) after deleting one or more manifests from a repository. If
- there are no unreferenced blobs resulting from the deletion of one or more
- manifests, garbage collection is effectively a noop.
- `See here for more information
- `_
- about how and why you should clean up your container registry periodically.
+ On creation, a reserved IP must be either assigned to a Droplet or reserved to a region.
- To request a garbage collection run on your registry, send a POST request to
- ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the
- following sequence of events on your registry.
+ *
+ To create a new reserved IP assigned to a Droplet, send a POST
+ request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute.
- * Set the registry to read-only mode, meaning no further write-scoped
- JWTs will be issued to registry clients. Existing write-scoped JWTs will
- continue to work until they expire which can take up to 15 minutes.
- * Wait until all existing write-scoped JWTs have expired.
- * Scan all registry manifests to determine which blobs are unreferenced.
- * Delete all unreferenced blobs from the registry.
- * Record the number of blobs deleted and bytes freed, mark the garbage
- collection status as ``success``.
- * Remove the read-only mode restriction from the registry, meaning write-scoped
- JWTs will once again be issued to registry clients.
+ *
+ To create a new reserved IP reserved to a region, send a POST request to
+ ``/v2/reserved_ips`` with the ``region`` attribute.
- :param registry_name: The name of a container registry. Required.
- :type registry_name: str
- :param body: Default value is None.
+ :param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
@@ -171425,87 +181672,83 @@ async def run_garbage_collection(
.. code-block:: python
# JSON input template you can fill out and use as your body input.
- body = {
- "type": "str" # Optional. Type of the garbage collection to run against this
- registry. Known values are: "untagged manifests only", "unreferenced blobs only",
- and "untagged manifests and unreferenced blobs".
- }
+ body = {}
- # response body for status code(s): 201
+ # response body for status code(s): 202
response == {
- "garbage_collection": {
- "blobs_deleted": 0, # Optional. The number of blobs deleted as a
- result of this garbage collection.
- "created_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was created.
- "freed_bytes": 0, # Optional. The number of bytes freed as a result
- of this garbage collection.
- "registry_name": "str", # Optional. The name of the container
- registry.
- "status": "str", # Optional. The current status of this garbage
- collection. Known values are: "requested", "waiting for write JWTs to
- expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
- "failed", "succeeded", and "cancelled".
- "updated_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was last updated.
- "uuid": "str" # Optional. A string specifying the UUID of the
- garbage collection.
+ "links": {
+ "actions": [
+ {
+ "href": "str", # Optional. A URL that can be used to
+ access the action.
+ "id": 0, # Optional. A unique numeric ID that can be
+ used to identify and reference an action.
+ "rel": "str" # Optional. A string specifying the
+ type of the related action.
+ }
+ ],
+ "droplets": [
+ {
+ "href": "str", # Optional. A URL that can be used to
+ access the action.
+ "id": 0, # Optional. A unique numeric ID that can be
+ used to identify and reference an action.
+ "rel": "str" # Optional. A string specifying the
+ type of the related action.
+ }
+ ]
+ },
+ "reserved_ip": {
+ "droplet": {},
+ "ip": "str", # Optional. The public IP address of the reserved IP.
+ It also serves as its identifier.
+ "locked": bool, # Optional. A boolean value indicating whether or
+ not the reserved IP has pending actions preventing new ones from being
+ submitted.
+ "project_id": "str", # Optional. The UUID of the project to which
+ the reserved IP currently belongs.:code:`
`:code:`
`Requires
+ ``project:read`` scope.
+ "region": {
+ "available": bool, # This is a boolean value that represents
+ whether new Droplets can be created in this region. Required.
+ "features": [
+ "str" # This attribute is set to an array which
+ contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region. This will
+ be a full name that is used in the control panel and other interfaces.
+ Required.
+ "sizes": [
+ "str" # This attribute is set to an array which
+ contains the identifying slugs for the sizes available in this
+ region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used as a
+ unique identifier for each region. Required.
+ }
}
}
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
"""
@overload
- async def run_garbage_collection(
- self,
- registry_name: str,
- body: Optional[IO[bytes]] = None,
- *,
- content_type: str = "application/json",
- **kwargs: Any
+ async def create(
+ self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Start Garbage Collection.
-
- **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+ """Create a New Reserved IP.
- Garbage collection enables users to clear out unreferenced blobs (layer &
- manifest data) after deleting one or more manifests from a repository. If
- there are no unreferenced blobs resulting from the deletion of one or more
- manifests, garbage collection is effectively a noop.
- `See here for more information
- `_
- about how and why you should clean up your container registry periodically.
+ On creation, a reserved IP must be either assigned to a Droplet or reserved to a region.
- To request a garbage collection run on your registry, send a POST request to
- ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the
- following sequence of events on your registry.
+ *
+ To create a new reserved IP assigned to a Droplet, send a POST
+ request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute.
- * Set the registry to read-only mode, meaning no further write-scoped
- JWTs will be issued to registry clients. Existing write-scoped JWTs will
- continue to work until they expire which can take up to 15 minutes.
- * Wait until all existing write-scoped JWTs have expired.
- * Scan all registry manifests to determine which blobs are unreferenced.
- * Delete all unreferenced blobs from the registry.
- * Record the number of blobs deleted and bytes freed, mark the garbage
- collection status as ``success``.
- * Remove the read-only mode restriction from the registry, meaning write-scoped
- JWTs will once again be issued to registry clients.
+ *
+ To create a new reserved IP reserved to a region, send a POST request to
+ ``/v2/reserved_ips`` with the ``region`` attribute.
- :param registry_name: The name of a container registry. Required.
- :type registry_name: str
- :param body: Default value is None.
+ :param body: Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
@@ -171517,79 +181760,79 @@ async def run_garbage_collection(
Example:
.. code-block:: python
- # response body for status code(s): 201
+ # response body for status code(s): 202
response == {
- "garbage_collection": {
- "blobs_deleted": 0, # Optional. The number of blobs deleted as a
- result of this garbage collection.
- "created_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was created.
- "freed_bytes": 0, # Optional. The number of bytes freed as a result
- of this garbage collection.
- "registry_name": "str", # Optional. The name of the container
- registry.
- "status": "str", # Optional. The current status of this garbage
- collection. Known values are: "requested", "waiting for write JWTs to
- expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
- "failed", "succeeded", and "cancelled".
- "updated_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was last updated.
- "uuid": "str" # Optional. A string specifying the UUID of the
- garbage collection.
+ "links": {
+ "actions": [
+ {
+ "href": "str", # Optional. A URL that can be used to
+ access the action.
+ "id": 0, # Optional. A unique numeric ID that can be
+ used to identify and reference an action.
+ "rel": "str" # Optional. A string specifying the
+ type of the related action.
+ }
+ ],
+ "droplets": [
+ {
+ "href": "str", # Optional. A URL that can be used to
+ access the action.
+ "id": 0, # Optional. A unique numeric ID that can be
+ used to identify and reference an action.
+ "rel": "str" # Optional. A string specifying the
+ type of the related action.
+ }
+ ]
+ },
+ "reserved_ip": {
+ "droplet": {},
+ "ip": "str", # Optional. The public IP address of the reserved IP.
+ It also serves as its identifier.
+ "locked": bool, # Optional. A boolean value indicating whether or
+ not the reserved IP has pending actions preventing new ones from being
+ submitted.
+ "project_id": "str", # Optional. The UUID of the project to which
+ the reserved IP currently belongs.:code:`
`:code:`
`Requires
+ ``project:read`` scope.
+ "region": {
+ "available": bool, # This is a boolean value that represents
+ whether new Droplets can be created in this region. Required.
+ "features": [
+ "str" # This attribute is set to an array which
+ contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region. This will
+ be a full name that is used in the control panel and other interfaces.
+ Required.
+ "sizes": [
+ "str" # This attribute is set to an array which
+ contains the identifying slugs for the sizes available in this
+ region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used as a
+ unique identifier for each region. Required.
+ }
}
}
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
"""
@distributed_trace_async
- async def run_garbage_collection(
- self,
- registry_name: str,
- body: Optional[Union[JSON, IO[bytes]]] = None,
- **kwargs: Any
- ) -> JSON:
+ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Start Garbage Collection.
-
- **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+ """Create a New Reserved IP.
- Garbage collection enables users to clear out unreferenced blobs (layer &
- manifest data) after deleting one or more manifests from a repository. If
- there are no unreferenced blobs resulting from the deletion of one or more
- manifests, garbage collection is effectively a noop.
- `See here for more information
- `_
- about how and why you should clean up your container registry periodically.
+ On creation, a reserved IP must be either assigned to a Droplet or reserved to a region.
- To request a garbage collection run on your registry, send a POST request to
- ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the
- following sequence of events on your registry.
+ *
+ To create a new reserved IP assigned to a Droplet, send a POST
+ request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute.
- * Set the registry to read-only mode, meaning no further write-scoped
- JWTs will be issued to registry clients. Existing write-scoped JWTs will
- continue to work until they expire which can take up to 15 minutes.
- * Wait until all existing write-scoped JWTs have expired.
- * Scan all registry manifests to determine which blobs are unreferenced.
- * Delete all unreferenced blobs from the registry.
- * Record the number of blobs deleted and bytes freed, mark the garbage
- collection status as ``success``.
- * Remove the read-only mode restriction from the registry, meaning write-scoped
- JWTs will once again be issued to registry clients.
+ *
+ To create a new reserved IP reserved to a region, send a POST request to
+ ``/v2/reserved_ips`` with the ``region`` attribute.
- :param registry_name: The name of a container registry. Required.
- :type registry_name: str
- :param body: Is either a JSON type or a IO[bytes] type. Default value is None.
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
@@ -171599,44 +181842,62 @@ async def run_garbage_collection(
.. code-block:: python
# JSON input template you can fill out and use as your body input.
- body = {
- "type": "str" # Optional. Type of the garbage collection to run against this
- registry. Known values are: "untagged manifests only", "unreferenced blobs only",
- and "untagged manifests and unreferenced blobs".
- }
+ body = {}
- # response body for status code(s): 201
+ # response body for status code(s): 202
response == {
- "garbage_collection": {
- "blobs_deleted": 0, # Optional. The number of blobs deleted as a
- result of this garbage collection.
- "created_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was created.
- "freed_bytes": 0, # Optional. The number of bytes freed as a result
- of this garbage collection.
- "registry_name": "str", # Optional. The name of the container
- registry.
- "status": "str", # Optional. The current status of this garbage
- collection. Known values are: "requested", "waiting for write JWTs to
- expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
- "failed", "succeeded", and "cancelled".
- "updated_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was last updated.
- "uuid": "str" # Optional. A string specifying the UUID of the
- garbage collection.
+ "links": {
+ "actions": [
+ {
+ "href": "str", # Optional. A URL that can be used to
+ access the action.
+ "id": 0, # Optional. A unique numeric ID that can be
+ used to identify and reference an action.
+ "rel": "str" # Optional. A string specifying the
+ type of the related action.
+ }
+ ],
+ "droplets": [
+ {
+ "href": "str", # Optional. A URL that can be used to
+ access the action.
+ "id": 0, # Optional. A unique numeric ID that can be
+ used to identify and reference an action.
+ "rel": "str" # Optional. A string specifying the
+ type of the related action.
+ }
+ ]
+ },
+ "reserved_ip": {
+ "droplet": {},
+ "ip": "str", # Optional. The public IP address of the reserved IP.
+ It also serves as its identifier.
+ "locked": bool, # Optional. A boolean value indicating whether or
+ not the reserved IP has pending actions preventing new ones from being
+ submitted.
+ "project_id": "str", # Optional. The UUID of the project to which
+ the reserved IP currently belongs.:code:`
`:code:`
`Requires
+ ``project:read`` scope.
+ "region": {
+ "available": bool, # This is a boolean value that represents
+ whether new Droplets can be created in this region. Required.
+ "features": [
+ "str" # This attribute is set to an array which
+ contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region. This will
+ be a full name that is used in the control panel and other interfaces.
+ Required.
+ "sizes": [
+ "str" # This attribute is set to an array which
+ contains the identifying slugs for the sizes available in this
+ region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used as a
+ unique identifier for each region. Required.
+ }
}
}
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
404: ResourceNotFoundError,
@@ -171665,13 +181926,9 @@ async def run_garbage_collection(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- if body is not None:
- _json = body
- else:
- _json = None
+ _json = body
- _request = build_registry_run_garbage_collection_request(
- registry_name=registry_name,
+ _request = build_reserved_ips_create_request(
content_type=content_type,
json=_json,
content=_content,
@@ -171689,14 +181946,136 @@ async def run_garbage_collection(
response = pipeline_response.http_response
- if response.status_code not in [201, 404]:
+ if response.status_code not in [202]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 201:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def get(self, reserved_ip: str, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """Retrieve an Existing Reserved IP.
+
+ To show information about a reserved IP, send a GET request to
+ ``/v2/reserved_ips/$RESERVED_IP_ADDR``.
+
+ :param reserved_ip: A reserved IP address. Required.
+ :type reserved_ip: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "reserved_ip": {
+ "droplet": {},
+ "ip": "str", # Optional. The public IP address of the reserved IP.
+ It also serves as its identifier.
+ "locked": bool, # Optional. A boolean value indicating whether or
+ not the reserved IP has pending actions preventing new ones from being
+ submitted.
+ "project_id": "str", # Optional. The UUID of the project to which
+ the reserved IP currently belongs.:code:`
`:code:`
`Requires
+ ``project:read`` scope.
+ "region": {
+ "available": bool, # This is a boolean value that represents
+ whether new Droplets can be created in this region. Required.
+ "features": [
+ "str" # This attribute is set to an array which
+ contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region. This will
+ be a full name that is used in the control panel and other interfaces.
+ Required.
+ "sizes": [
+ "str" # This attribute is set to an array which
+ contains the identifying slugs for the sizes available in this
+ region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used as a
+ unique identifier for each region. Required.
+ }
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_reserved_ips_get_request(
+ reserved_ip=reserved_ip,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -171734,45 +182113,25 @@ async def run_garbage_collection(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSON:
+ async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Get Active Garbage Collection.
+ """Delete a Reserved IP.
- **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+ To delete a reserved IP and remove it from your account, send a DELETE request
+ to ``/v2/reserved_ips/$RESERVED_IP_ADDR``.
- To get information about the currently-active garbage collection
- for a registry, send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collection``.
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
- :param registry_name: The name of a container registry. Required.
- :type registry_name: str
- :return: JSON object
- :rtype: JSON
+ :param reserved_ip: A reserved IP address. Required.
+ :type reserved_ip: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
- response == {
- "garbage_collection": {
- "blobs_deleted": 0, # Optional. The number of blobs deleted as a
- result of this garbage collection.
- "created_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was created.
- "freed_bytes": 0, # Optional. The number of bytes freed as a result
- of this garbage collection.
- "registry_name": "str", # Optional. The name of the container
- registry.
- "status": "str", # Optional. The current status of this garbage
- collection. Known values are: "requested", "waiting for write JWTs to
- expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
- "failed", "succeeded", and "cancelled".
- "updated_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was last updated.
- "uuid": "str" # Optional. A string specifying the UUID of the
- garbage collection.
- }
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -171801,10 +182160,10 @@ async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSO
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_registry_get_garbage_collection_request(
- registry_name=registry_name,
+ _request = build_reserved_ips_delete_request(
+ reserved_ip=reserved_ip,
headers=_headers,
params=_params,
)
@@ -171819,14 +182178,15 @@ async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSO
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -171837,11 +182197,6 @@ async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSO
"int", response.headers.get("ratelimit-reset")
)
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -171859,28 +182214,40 @@ async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSO
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
+
+
+class ReservedIPsActionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~pydo.aio.GeneratedClient`'s
+ :attr:`reserved_ips_actions` attribute.
+ """
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = (
+ input_args.pop(0) if input_args else kwargs.pop("deserializer")
+ )
@distributed_trace_async
- async def list_garbage_collections(
- self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any
- ) -> JSON:
+ async def list(self, reserved_ip: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """List Garbage Collections.
-
- **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+ """List All Actions for a Reserved IP.
- To get information about past garbage collections for a registry,
- send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collections``.
+ To retrieve all actions that have been executed on a reserved IP, send a GET request to
+ ``/v2/reserved_ips/$RESERVED_IP/actions``.
- :param registry_name: The name of a container registry. Required.
- :type registry_name: str
- :keyword per_page: Number of items returned per page. Default value is 20.
- :paramtype per_page: int
- :keyword page: Which 'page' of paginated results to return. Default value is 1.
- :paramtype page: int
+ :param reserved_ip: A reserved IP address. Required.
+ :type reserved_ip: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -171890,26 +182257,56 @@ async def list_garbage_collections(
# response body for status code(s): 200
response == {
- "garbage_collections": [
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "actions": [
{
- "blobs_deleted": 0, # Optional. The number of blobs deleted
- as a result of this garbage collection.
- "created_at": "2020-02-20 00:00:00", # Optional. The time
- the garbage collection was created.
- "freed_bytes": 0, # Optional. The number of bytes freed as a
- result of this garbage collection.
- "registry_name": "str", # Optional. The name of the
- container registry.
- "status": "str", # Optional. The current status of this
- garbage collection. Known values are: "requested", "waiting for write
- JWTs to expire", "scanning manifests", "deleting unreferenced blobs",
- "cancelling", "failed", "succeeded", and "cancelled".
- "updated_at": "2020-02-20 00:00:00", # Optional. The time
- the garbage collection was last updated.
- "uuid": "str" # Optional. A string specifying the UUID of
- the garbage collection.
+ "completed_at": "2020-02-20 00:00:00", # Optional. A time
+ value given in ISO8601 combined date and time format that represents when
+ the action was completed.
+ "id": 0, # Optional. A unique numeric ID that can be used to
+ identify and reference an action.
+ "region": {
+ "available": bool, # This is a boolean value that
+ represents whether new Droplets can be created in this region.
+ Required.
+ "features": [
+ "str" # This attribute is set to an array
+ which contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region.
+ This will be a full name that is used in the control panel and other
+ interfaces. Required.
+ "sizes": [
+ "str" # This attribute is set to an array
+ which contains the identifying slugs for the sizes available in
+ this region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used
+ as a unique identifier for each region. Required.
+ },
+ "region_slug": "str", # Optional. A human-readable string
+ that is used as a unique identifier for each region.
+ "resource_id": 0, # Optional. A unique identifier for the
+ resource that the action is associated with.
+ "resource_type": "str", # Optional. The type of resource
+ that the action is associated with.
+ "started_at": "2020-02-20 00:00:00", # Optional. A time
+ value given in ISO8601 combined date and time format that represents when
+ the action was initiated.
+ "status": "in-progress", # Optional. Default value is
+ "in-progress". The current status of the action. This can be
+ "in-progress", "completed", or "errored". Known values are:
+ "in-progress", "completed", and "errored".
+ "type": "str" # Optional. This is the type of action that
+ the object represents. For example, this could be "transfer" to represent
+ the state of an image transfer action.
}
- ]
+ ],
+ "links": {
+ "pages": {}
+ }
}
# response body for status code(s): 404
response == {
@@ -171941,10 +182338,8 @@ async def list_garbage_collections(
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_registry_list_garbage_collections_request(
- registry_name=registry_name,
- per_page=per_page,
- page=page,
+ _request = build_reserved_ips_actions_list_request(
+ reserved_ip=reserved_ip,
headers=_headers,
params=_params,
)
@@ -172004,29 +182399,35 @@ async def list_garbage_collections(
return cast(JSON, deserialized) # type: ignore
@overload
- async def update_garbage_collection(
+ async def post(
self,
- registry_name: str,
- garbage_collection_uuid: str,
- body: JSON,
+ reserved_ip: str,
+ body: Optional[JSON] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update Garbage Collection.
+ """Initiate a Reserved IP Action.
- **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+ To initiate an action on a reserved IP send a POST request to
+ ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request,
+ set the ``type`` attribute to on of the supported action types:
- To cancel the currently-active garbage collection for a registry,
- send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID``
- and specify one or more of the attributes below.
+ .. list-table::
+ :header-rows: 1
- :param registry_name: The name of a container registry. Required.
- :type registry_name: str
- :param garbage_collection_uuid: The UUID of a garbage collection run. Required.
- :type garbage_collection_uuid: str
- :param body: Required.
+ * - Action
+ - Details
+ * - ``assign``
+ - Assigns a reserved IP to a Droplet
+ * - ``unassign``
+ - Unassign a reserved IP from a Droplet.
+
+ :param reserved_ip: A reserved IP address. Required.
+ :type reserved_ip: str
+ :param body: The ``type`` attribute set in the request body will specify the action that
+ will be taken on the reserved IP. Default value is None.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
@@ -172039,30 +182440,51 @@ async def update_garbage_collection(
.. code-block:: python
# JSON input template you can fill out and use as your body input.
- body = {
- "cancel": bool # Optional. A boolean value indicating that the garbage
- collection should be cancelled.
- }
+ body = {}
- # response body for status code(s): 200
+ # response body for status code(s): 201
response == {
- "garbage_collection": {
- "blobs_deleted": 0, # Optional. The number of blobs deleted as a
- result of this garbage collection.
- "created_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was created.
- "freed_bytes": 0, # Optional. The number of bytes freed as a result
- of this garbage collection.
- "registry_name": "str", # Optional. The name of the container
- registry.
- "status": "str", # Optional. The current status of this garbage
- collection. Known values are: "requested", "waiting for write JWTs to
- expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
- "failed", "succeeded", and "cancelled".
- "updated_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was last updated.
- "uuid": "str" # Optional. A string specifying the UUID of the
- garbage collection.
+ "action": {
+ "completed_at": "2020-02-20 00:00:00", # Optional. A time value
+ given in ISO8601 combined date and time format that represents when the
+ action was completed.
+ "id": 0, # Optional. A unique numeric ID that can be used to
+ identify and reference an action.
+ "project_id": "str", # Optional. The UUID of the project to which
+ the reserved IP currently belongs.
+ "region": {
+ "available": bool, # This is a boolean value that represents
+ whether new Droplets can be created in this region. Required.
+ "features": [
+ "str" # This attribute is set to an array which
+ contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region. This will
+ be a full name that is used in the control panel and other interfaces.
+ Required.
+ "sizes": [
+ "str" # This attribute is set to an array which
+ contains the identifying slugs for the sizes available in this
+ region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used as a
+ unique identifier for each region. Required.
+ },
+ "region_slug": "str", # Optional. A human-readable string that is
+ used as a unique identifier for each region.
+ "resource_id": 0, # Optional. A unique identifier for the resource
+ that the action is associated with.
+ "resource_type": "str", # Optional. The type of resource that the
+ action is associated with.
+ "started_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the action was
+ initiated.
+ "status": "in-progress", # Optional. Default value is "in-progress".
+ The current status of the action. This can be "in-progress", "completed", or
+ "errored". Known values are: "in-progress", "completed", and "errored".
+ "type": "str" # Optional. This is the type of action that the object
+ represents. For example, this could be "transfer" to represent the state of
+ an image transfer action.
}
}
# response body for status code(s): 404
@@ -172079,29 +182501,35 @@ async def update_garbage_collection(
"""
@overload
- async def update_garbage_collection(
+ async def post(
self,
- registry_name: str,
- garbage_collection_uuid: str,
- body: IO[bytes],
+ reserved_ip: str,
+ body: Optional[IO[bytes]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update Garbage Collection.
+ """Initiate a Reserved IP Action.
- **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+ To initiate an action on a reserved IP send a POST request to
+ ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request,
+ set the ``type`` attribute to on of the supported action types:
- To cancel the currently-active garbage collection for a registry,
- send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID``
- and specify one or more of the attributes below.
+ .. list-table::
+ :header-rows: 1
- :param registry_name: The name of a container registry. Required.
- :type registry_name: str
- :param garbage_collection_uuid: The UUID of a garbage collection run. Required.
- :type garbage_collection_uuid: str
- :param body: Required.
+ * - Action
+ - Details
+ * - ``assign``
+ - Assigns a reserved IP to a Droplet
+ * - ``unassign``
+ - Unassign a reserved IP from a Droplet.
+
+ :param reserved_ip: A reserved IP address. Required.
+ :type reserved_ip: str
+ :param body: The ``type`` attribute set in the request body will specify the action that
+ will be taken on the reserved IP. Default value is None.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
@@ -172113,25 +182541,49 @@ async def update_garbage_collection(
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # response body for status code(s): 201
response == {
- "garbage_collection": {
- "blobs_deleted": 0, # Optional. The number of blobs deleted as a
- result of this garbage collection.
- "created_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was created.
- "freed_bytes": 0, # Optional. The number of bytes freed as a result
- of this garbage collection.
- "registry_name": "str", # Optional. The name of the container
- registry.
- "status": "str", # Optional. The current status of this garbage
- collection. Known values are: "requested", "waiting for write JWTs to
- expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
- "failed", "succeeded", and "cancelled".
- "updated_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was last updated.
- "uuid": "str" # Optional. A string specifying the UUID of the
- garbage collection.
+ "action": {
+ "completed_at": "2020-02-20 00:00:00", # Optional. A time value
+ given in ISO8601 combined date and time format that represents when the
+ action was completed.
+ "id": 0, # Optional. A unique numeric ID that can be used to
+ identify and reference an action.
+ "project_id": "str", # Optional. The UUID of the project to which
+ the reserved IP currently belongs.
+ "region": {
+ "available": bool, # This is a boolean value that represents
+ whether new Droplets can be created in this region. Required.
+ "features": [
+ "str" # This attribute is set to an array which
+ contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region. This will
+ be a full name that is used in the control panel and other interfaces.
+ Required.
+ "sizes": [
+ "str" # This attribute is set to an array which
+ contains the identifying slugs for the sizes available in this
+ region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used as a
+ unique identifier for each region. Required.
+ },
+ "region_slug": "str", # Optional. A human-readable string that is
+ used as a unique identifier for each region.
+ "resource_id": 0, # Optional. A unique identifier for the resource
+ that the action is associated with.
+ "resource_type": "str", # Optional. The type of resource that the
+ action is associated with.
+ "started_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the action was
+ initiated.
+ "status": "in-progress", # Optional. Default value is "in-progress".
+ The current status of the action. This can be "in-progress", "completed", or
+ "errored". Known values are: "in-progress", "completed", and "errored".
+ "type": "str" # Optional. This is the type of action that the object
+ represents. For example, this could be "transfer" to represent the state of
+ an image transfer action.
}
}
# response body for status code(s): 404
@@ -172148,27 +182600,34 @@ async def update_garbage_collection(
"""
@distributed_trace_async
- async def update_garbage_collection(
+ async def post(
self,
- registry_name: str,
- garbage_collection_uuid: str,
- body: Union[JSON, IO[bytes]],
+ reserved_ip: str,
+ body: Optional[Union[JSON, IO[bytes]]] = None,
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update Garbage Collection.
+ """Initiate a Reserved IP Action.
- **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.**
+ To initiate an action on a reserved IP send a POST request to
+ ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request,
+ set the ``type`` attribute to on of the supported action types:
- To cancel the currently-active garbage collection for a registry,
- send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID``
- and specify one or more of the attributes below.
+ .. list-table::
+ :header-rows: 1
- :param registry_name: The name of a container registry. Required.
- :type registry_name: str
- :param garbage_collection_uuid: The UUID of a garbage collection run. Required.
- :type garbage_collection_uuid: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
+ * - Action
+ - Details
+ * - ``assign``
+ - Assigns a reserved IP to a Droplet
+ * - ``unassign``
+ - Unassign a reserved IP from a Droplet.
+
+ :param reserved_ip: A reserved IP address. Required.
+ :type reserved_ip: str
+ :param body: The ``type`` attribute set in the request body will specify the action that
+ will be taken on the reserved IP. Is either a JSON type or a IO[bytes] type. Default value is
+ None.
:type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
@@ -172178,30 +182637,51 @@ async def update_garbage_collection(
.. code-block:: python
# JSON input template you can fill out and use as your body input.
- body = {
- "cancel": bool # Optional. A boolean value indicating that the garbage
- collection should be cancelled.
- }
+ body = {}
- # response body for status code(s): 200
+ # response body for status code(s): 201
response == {
- "garbage_collection": {
- "blobs_deleted": 0, # Optional. The number of blobs deleted as a
- result of this garbage collection.
- "created_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was created.
- "freed_bytes": 0, # Optional. The number of bytes freed as a result
- of this garbage collection.
- "registry_name": "str", # Optional. The name of the container
- registry.
- "status": "str", # Optional. The current status of this garbage
- collection. Known values are: "requested", "waiting for write JWTs to
- expire", "scanning manifests", "deleting unreferenced blobs", "cancelling",
- "failed", "succeeded", and "cancelled".
- "updated_at": "2020-02-20 00:00:00", # Optional. The time the
- garbage collection was last updated.
- "uuid": "str" # Optional. A string specifying the UUID of the
- garbage collection.
+ "action": {
+ "completed_at": "2020-02-20 00:00:00", # Optional. A time value
+ given in ISO8601 combined date and time format that represents when the
+ action was completed.
+ "id": 0, # Optional. A unique numeric ID that can be used to
+ identify and reference an action.
+ "project_id": "str", # Optional. The UUID of the project to which
+ the reserved IP currently belongs.
+ "region": {
+ "available": bool, # This is a boolean value that represents
+ whether new Droplets can be created in this region. Required.
+ "features": [
+ "str" # This attribute is set to an array which
+ contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region. This will
+ be a full name that is used in the control panel and other interfaces.
+ Required.
+ "sizes": [
+ "str" # This attribute is set to an array which
+ contains the identifying slugs for the sizes available in this
+ region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used as a
+ unique identifier for each region. Required.
+ },
+ "region_slug": "str", # Optional. A human-readable string that is
+ used as a unique identifier for each region.
+ "resource_id": 0, # Optional. A unique identifier for the resource
+ that the action is associated with.
+ "resource_type": "str", # Optional. The type of resource that the
+ action is associated with.
+ "started_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the action was
+ initiated.
+ "status": "in-progress", # Optional. Default value is "in-progress".
+ The current status of the action. This can be "in-progress", "completed", or
+ "errored". Known values are: "in-progress", "completed", and "errored".
+ "type": "str" # Optional. This is the type of action that the object
+ represents. For example, this could be "transfer" to represent the state of
+ an image transfer action.
}
}
# response body for status code(s): 404
@@ -172243,11 +182723,13 @@ async def update_garbage_collection(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- _json = body
+ if body is not None:
+ _json = body
+ else:
+ _json = None
- _request = build_registry_update_garbage_collection_request(
- registry_name=registry_name,
- garbage_collection_uuid=garbage_collection_uuid,
+ _request = build_reserved_ips_actions_post_request(
+ reserved_ip=reserved_ip,
content_type=content_type,
json=_json,
content=_content,
@@ -172265,14 +182747,14 @@ async def update_garbage_collection(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [201, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 201:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -172310,27 +182792,18 @@ async def update_garbage_collection(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get_options(self, **kwargs: Any) -> JSON:
+ async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """List Registry Options (Subscription Tiers and Available Regions).
-
- **Note: This endpoint is deprecated and may be removed in a future version. There is no
- alternative.****\\ Note: This endpoint is deprecated. Please use the ``/v2/registries``
- endpoint instead.**
-
- This endpoint serves to provide additional information as to which option values
- are available when creating a container registry.
-
- There are multiple subscription tiers available for container registry. Each
- tier allows a different number of image repositories to be created in your
- registry, and has a different amount of storage and transfer included.
-
- There are multiple regions available for container registry and controls
- where your data is stored.
+ """Retrieve an Existing Reserved IP Action.
- To list the available options, send a GET request to
- ``/v2/registry/options``.
+ To retrieve the status of a reserved IP action, send a GET request to
+ ``/v2/reserved_ips/$RESERVED_IP/actions/$ACTION_ID``.
+ :param reserved_ip: A reserved IP address. Required.
+ :type reserved_ip: str
+ :param action_id: A unique numeric ID that can be used to identify and reference an action.
+ Required.
+ :type action_id: int
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -172340,44 +182813,60 @@ async def get_options(self, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "options": {
- "available_regions": [
- "str" # Optional.
- ],
- "subscription_tiers": [
- {
- "allow_storage_overage": bool, # Optional. A boolean
- indicating whether the subscription tier supports additional storage
- above what is included in the base plan at an additional cost per GiB
- used.
- "eligibility_reasons": [
- "str" # Optional. If your account is not
- eligible to use a certain subscription tier, this will include a
- list of reasons that prevent you from using the tier.
- ],
- "eligible": bool, # Optional. A boolean indicating
- whether your account it eligible to use a certain subscription tier.
- "included_bandwidth_bytes": 0, # Optional. The
- amount of outbound data transfer included in the subscription tier in
- bytes.
- "included_repositories": 0, # Optional. The number
- of repositories included in the subscription tier. ``0`` indicates
- that the subscription tier includes unlimited repositories.
- "included_storage_bytes": 0, # Optional. The amount
- of storage included in the subscription tier in bytes.
- "monthly_price_in_cents": 0, # Optional. The monthly
- cost of the subscription tier in cents.
- "name": "str", # Optional. The name of the
- subscription tier.
- "slug": "str", # Optional. The slug identifier of
- the subscription tier.
- "storage_overage_price_in_cents": 0 # Optional. The
- price paid in cents per GiB for additional storage beyond what is
- included in the subscription plan.
- }
- ]
+ "action": {
+ "completed_at": "2020-02-20 00:00:00", # Optional. A time value
+ given in ISO8601 combined date and time format that represents when the
+ action was completed.
+ "id": 0, # Optional. A unique numeric ID that can be used to
+ identify and reference an action.
+ "project_id": "str", # Optional. The UUID of the project to which
+ the reserved IP currently belongs.
+ "region": {
+ "available": bool, # This is a boolean value that represents
+ whether new Droplets can be created in this region. Required.
+ "features": [
+ "str" # This attribute is set to an array which
+ contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region. This will
+ be a full name that is used in the control panel and other interfaces.
+ Required.
+ "sizes": [
+ "str" # This attribute is set to an array which
+ contains the identifying slugs for the sizes available in this
+ region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used as a
+ unique identifier for each region. Required.
+ },
+ "region_slug": "str", # Optional. A human-readable string that is
+ used as a unique identifier for each region.
+ "resource_id": 0, # Optional. A unique identifier for the resource
+ that the action is associated with.
+ "resource_type": "str", # Optional. The type of resource that the
+ action is associated with.
+ "started_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the action was
+ initiated.
+ "status": "in-progress", # Optional. Default value is "in-progress".
+ The current status of the action. This can be "in-progress", "completed", or
+ "errored". Known values are: "in-progress", "completed", and "errored".
+ "type": "str" # Optional. This is the type of action that the object
+ represents. For example, this could be "transfer" to represent the state of
+ an image transfer action.
}
}
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
404: ResourceNotFoundError,
@@ -172397,7 +182886,9 @@ async def get_options(self, **kwargs: Any) -> JSON:
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_registry_get_options_request(
+ _request = build_reserved_ips_actions_get_request(
+ reserved_ip=reserved_ip,
+ action_id=action_id,
headers=_headers,
params=_params,
)
@@ -172412,27 +182903,44 @@ async def get_options(self, **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [200]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
@@ -172440,14 +182948,14 @@ async def get_options(self, **kwargs: Any) -> JSON:
return cast(JSON, deserialized) # type: ignore
-class ReservedIPsOperations:
+class ReservedIPv6Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~pydo.aio.GeneratedClient`'s
- :attr:`reserved_ips` attribute.
+ :attr:`reserved_ipv6` attribute.
"""
def __init__(self, *args, **kwargs) -> None:
@@ -172462,10 +182970,10 @@ def __init__(self, *args, **kwargs) -> None:
@distributed_trace_async
async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """List All Reserved IPs.
+ """List All Reserved IPv6s.
- To list all of the reserved IPs available on your account, send a GET request to
- ``/v2/reserved_ips``.
+ To list all of the reserved IPv6s available on your account, send a GET request to
+ ``/v2/reserved_ipv6``.
:keyword per_page: Number of items returned per page. Default value is 20.
:paramtype per_page: int
@@ -172486,36 +182994,15 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO
"links": {
"pages": {}
},
- "reserved_ips": [
+ "reserved_ipv6s": [
{
"droplet": {},
"ip": "str", # Optional. The public IP address of the
- reserved IP. It also serves as its identifier.
- "locked": bool, # Optional. A boolean value indicating
- whether or not the reserved IP has pending actions preventing new ones
- from being submitted.
- "project_id": "str", # Optional. The UUID of the project to
- which the reserved IP currently belongs.:code:`
`:code:`
`Requires
- ``project:read`` scope.
- "region": {
- "available": bool, # This is a boolean value that
- represents whether new Droplets can be created in this region.
- Required.
- "features": [
- "str" # This attribute is set to an array
- which contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region.
- This will be a full name that is used in the control panel and other
- interfaces. Required.
- "sizes": [
- "str" # This attribute is set to an array
- which contains the identifying slugs for the sizes available in
- this region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used
- as a unique identifier for each region. Required.
- }
+ reserved IPv6. It also serves as its identifier.
+ "region_slug": "str", # Optional. The region that the
+ reserved IPv6 is reserved to. When you query a reserved IPv6,the
+ region_slug will be returned.
+ "reserved_at": "2020-02-20 00:00:00" # Optional.
}
]
}
@@ -172538,7 +183025,7 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_reserved_ips_list_request(
+ _request = build_reserved_ipv6_list_request(
per_page=per_page,
page=page,
headers=_headers,
@@ -172587,18 +183074,13 @@ async def create(
self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create a New Reserved IP.
-
- On creation, a reserved IP must be either assigned to a Droplet or reserved to a region.
+ """Create a New Reserved IPv6.
+ On creation, a reserved IPv6 must be reserved to a region.
- *
- To create a new reserved IP assigned to a Droplet, send a POST
- request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute.
- *
- To create a new reserved IP reserved to a region, send a POST request to
- ``/v2/reserved_ips`` with the ``region`` attribute.
+ * To create a new reserved IPv6 reserved to a region, send a POST request to
+ ``/v2/reserved_ipv6`` with the ``region_slug`` attribute.
:param body: Required.
:type body: JSON
@@ -172613,60 +183095,20 @@ async def create(
.. code-block:: python
# JSON input template you can fill out and use as your body input.
- body = {}
+ body = {
+ "region_slug": "str" # The slug identifier for the region the reserved IPv6
+ will be reserved to. Required.
+ }
- # response body for status code(s): 202
+ # response body for status code(s): 201
response == {
- "links": {
- "actions": [
- {
- "href": "str", # Optional. A URL that can be used to
- access the action.
- "id": 0, # Optional. A unique numeric ID that can be
- used to identify and reference an action.
- "rel": "str" # Optional. A string specifying the
- type of the related action.
- }
- ],
- "droplets": [
- {
- "href": "str", # Optional. A URL that can be used to
- access the action.
- "id": 0, # Optional. A unique numeric ID that can be
- used to identify and reference an action.
- "rel": "str" # Optional. A string specifying the
- type of the related action.
- }
- ]
- },
- "reserved_ip": {
- "droplet": {},
- "ip": "str", # Optional. The public IP address of the reserved IP.
+ "reserved_ipv6": {
+ "ip": "str", # Optional. The public IP address of the reserved IPv6.
It also serves as its identifier.
- "locked": bool, # Optional. A boolean value indicating whether or
- not the reserved IP has pending actions preventing new ones from being
- submitted.
- "project_id": "str", # Optional. The UUID of the project to which
- the reserved IP currently belongs.:code:`
`:code:`
`Requires
- ``project:read`` scope.
- "region": {
- "available": bool, # This is a boolean value that represents
- whether new Droplets can be created in this region. Required.
- "features": [
- "str" # This attribute is set to an array which
- contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region. This will
- be a full name that is used in the control panel and other interfaces.
- Required.
- "sizes": [
- "str" # This attribute is set to an array which
- contains the identifying slugs for the sizes available in this
- region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used as a
- unique identifier for each region. Required.
- }
+ "region_slug": "str", # Optional. The region that the reserved IPv6
+ is reserved to. When you query a reserved IPv6,the region_slug will be
+ returned.
+ "reserved_at": "2020-02-20 00:00:00" # Optional.
}
}
"""
@@ -172676,18 +183118,13 @@ async def create(
self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create a New Reserved IP.
-
- On creation, a reserved IP must be either assigned to a Droplet or reserved to a region.
+ """Create a New Reserved IPv6.
+ On creation, a reserved IPv6 must be reserved to a region.
- *
- To create a new reserved IP assigned to a Droplet, send a POST
- request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute.
- *
- To create a new reserved IP reserved to a region, send a POST request to
- ``/v2/reserved_ips`` with the ``region`` attribute.
+ * To create a new reserved IPv6 reserved to a region, send a POST request to
+ ``/v2/reserved_ipv6`` with the ``region_slug`` attribute.
:param body: Required.
:type body: IO[bytes]
@@ -172701,58 +183138,15 @@ async def create(
Example:
.. code-block:: python
- # response body for status code(s): 202
+ # response body for status code(s): 201
response == {
- "links": {
- "actions": [
- {
- "href": "str", # Optional. A URL that can be used to
- access the action.
- "id": 0, # Optional. A unique numeric ID that can be
- used to identify and reference an action.
- "rel": "str" # Optional. A string specifying the
- type of the related action.
- }
- ],
- "droplets": [
- {
- "href": "str", # Optional. A URL that can be used to
- access the action.
- "id": 0, # Optional. A unique numeric ID that can be
- used to identify and reference an action.
- "rel": "str" # Optional. A string specifying the
- type of the related action.
- }
- ]
- },
- "reserved_ip": {
- "droplet": {},
- "ip": "str", # Optional. The public IP address of the reserved IP.
+ "reserved_ipv6": {
+ "ip": "str", # Optional. The public IP address of the reserved IPv6.
It also serves as its identifier.
- "locked": bool, # Optional. A boolean value indicating whether or
- not the reserved IP has pending actions preventing new ones from being
- submitted.
- "project_id": "str", # Optional. The UUID of the project to which
- the reserved IP currently belongs.:code:`
`:code:`
`Requires
- ``project:read`` scope.
- "region": {
- "available": bool, # This is a boolean value that represents
- whether new Droplets can be created in this region. Required.
- "features": [
- "str" # This attribute is set to an array which
- contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region. This will
- be a full name that is used in the control panel and other interfaces.
- Required.
- "sizes": [
- "str" # This attribute is set to an array which
- contains the identifying slugs for the sizes available in this
- region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used as a
- unique identifier for each region. Required.
- }
+ "region_slug": "str", # Optional. The region that the reserved IPv6
+ is reserved to. When you query a reserved IPv6,the region_slug will be
+ returned.
+ "reserved_at": "2020-02-20 00:00:00" # Optional.
}
}
"""
@@ -172760,18 +183154,13 @@ async def create(
@distributed_trace_async
async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Create a New Reserved IP.
-
- On creation, a reserved IP must be either assigned to a Droplet or reserved to a region.
+ """Create a New Reserved IPv6.
+ On creation, a reserved IPv6 must be reserved to a region.
- *
- To create a new reserved IP assigned to a Droplet, send a POST
- request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute.
- *
- To create a new reserved IP reserved to a region, send a POST request to
- ``/v2/reserved_ips`` with the ``region`` attribute.
+ * To create a new reserved IPv6 reserved to a region, send a POST request to
+ ``/v2/reserved_ipv6`` with the ``region_slug`` attribute.
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
@@ -172783,60 +183172,20 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
- body = {}
+ body = {
+ "region_slug": "str" # The slug identifier for the region the reserved IPv6
+ will be reserved to. Required.
+ }
- # response body for status code(s): 202
+ # response body for status code(s): 201
response == {
- "links": {
- "actions": [
- {
- "href": "str", # Optional. A URL that can be used to
- access the action.
- "id": 0, # Optional. A unique numeric ID that can be
- used to identify and reference an action.
- "rel": "str" # Optional. A string specifying the
- type of the related action.
- }
- ],
- "droplets": [
- {
- "href": "str", # Optional. A URL that can be used to
- access the action.
- "id": 0, # Optional. A unique numeric ID that can be
- used to identify and reference an action.
- "rel": "str" # Optional. A string specifying the
- type of the related action.
- }
- ]
- },
- "reserved_ip": {
- "droplet": {},
- "ip": "str", # Optional. The public IP address of the reserved IP.
+ "reserved_ipv6": {
+ "ip": "str", # Optional. The public IP address of the reserved IPv6.
It also serves as its identifier.
- "locked": bool, # Optional. A boolean value indicating whether or
- not the reserved IP has pending actions preventing new ones from being
- submitted.
- "project_id": "str", # Optional. The UUID of the project to which
- the reserved IP currently belongs.:code:`
`:code:`
`Requires
- ``project:read`` scope.
- "region": {
- "available": bool, # This is a boolean value that represents
- whether new Droplets can be created in this region. Required.
- "features": [
- "str" # This attribute is set to an array which
- contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region. This will
- be a full name that is used in the control panel and other interfaces.
- Required.
- "sizes": [
- "str" # This attribute is set to an array which
- contains the identifying slugs for the sizes available in this
- region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used as a
- unique identifier for each region. Required.
- }
+ "region_slug": "str", # Optional. The region that the reserved IPv6
+ is reserved to. When you query a reserved IPv6,the region_slug will be
+ returned.
+ "reserved_at": "2020-02-20 00:00:00" # Optional.
}
}
"""
@@ -172869,7 +183218,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
else:
_json = body
- _request = build_reserved_ips_create_request(
+ _request = build_reserved_ipv6_create_request(
content_type=content_type,
json=_json,
content=_content,
@@ -172887,7 +183236,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [202]:
+ if response.status_code not in [201]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -172915,15 +183264,15 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get(self, reserved_ip: str, **kwargs: Any) -> JSON:
+ async def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Retrieve an Existing Reserved IP.
+ """Retrieve an Existing Reserved IPv6.
- To show information about a reserved IP, send a GET request to
- ``/v2/reserved_ips/$RESERVED_IP_ADDR``.
+ To show information about a reserved IPv6, send a GET request to
+ ``/v2/reserved_ipv6/$RESERVED_IPV6``.
- :param reserved_ip: A reserved IP address. Required.
- :type reserved_ip: str
+ :param reserved_ipv6: A reserved IPv6 address. Required.
+ :type reserved_ipv6: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -172933,34 +183282,15 @@ async def get(self, reserved_ip: str, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "reserved_ip": {
+ "reserved_ipv6": {
"droplet": {},
- "ip": "str", # Optional. The public IP address of the reserved IP.
+ "ip": "str", # Optional. The public IP address of the reserved IPv6.
It also serves as its identifier.
- "locked": bool, # Optional. A boolean value indicating whether or
- not the reserved IP has pending actions preventing new ones from being
- submitted.
- "project_id": "str", # Optional. The UUID of the project to which
- the reserved IP currently belongs.:code:`
`:code:`
`Requires
- ``project:read`` scope.
- "region": {
- "available": bool, # This is a boolean value that represents
- whether new Droplets can be created in this region. Required.
- "features": [
- "str" # This attribute is set to an array which
- contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region. This will
- be a full name that is used in the control panel and other interfaces.
- Required.
- "sizes": [
- "str" # This attribute is set to an array which
- contains the identifying slugs for the sizes available in this
- region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used as a
- unique identifier for each region. Required.
- }
+ "region_slug": "str", # Optional. The region that the reserved IPv6
+ is reserved to. When you query a reserved IPv6,the region_slug will be
+ returned.
+ "reserved_at": "2020-02-20 00:00:00" # Optional. The date and time
+ when the reserved IPv6 was reserved.
}
}
# response body for status code(s): 404
@@ -172993,8 +183323,8 @@ async def get(self, reserved_ip: str, **kwargs: Any) -> JSON:
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_reserved_ips_get_request(
- reserved_ip=reserved_ip,
+ _request = build_reserved_ipv6_get_request(
+ reserved_ipv6=reserved_ipv6,
headers=_headers,
params=_params,
)
@@ -173054,18 +183384,18 @@ async def get(self, reserved_ip: str, **kwargs: Any) -> JSON:
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]:
+ async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Delete a Reserved IP.
+ """Delete a Reserved IPv6.
To delete a reserved IP and remove it from your account, send a DELETE request
- to ``/v2/reserved_ips/$RESERVED_IP_ADDR``.
+ to ``/v2/reserved_ipv6/$RESERVED_IPV6``.
A successful request will receive a 204 status code with no body in response.
This indicates that the request was processed successfully.
- :param reserved_ip: A reserved IP address. Required.
- :type reserved_ip: str
+ :param reserved_ipv6: A reserved IPv6 address. Required.
+ :type reserved_ipv6: str
:return: JSON object or None
:rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
@@ -173073,7 +183403,7 @@ async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]:
Example:
.. code-block:: python
- # response body for status code(s): 404
+ # response body for status code(s): 404, 422
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -173103,8 +183433,8 @@ async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]:
cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_reserved_ips_delete_request(
- reserved_ip=reserved_ip,
+ _request = build_reserved_ipv6_delete_request(
+ reserved_ipv6=reserved_ipv6,
headers=_headers,
params=_params,
)
@@ -173119,7 +183449,7 @@ async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]:
response = pipeline_response.http_response
- if response.status_code not in [204, 404]:
+ if response.status_code not in [204, 404, 422]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -173154,20 +183484,36 @@ async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]:
else:
deserialized = None
+ if response.status_code == 422:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
-class ReservedIPsActionsOperations:
+class ReservedIPv6ActionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~pydo.aio.GeneratedClient`'s
- :attr:`reserved_ips_actions` attribute.
+ :attr:`reserved_ipv6_actions` attribute.
"""
def __init__(self, *args, **kwargs) -> None:
@@ -173179,16 +183525,40 @@ def __init__(self, *args, **kwargs) -> None:
input_args.pop(0) if input_args else kwargs.pop("deserializer")
)
- @distributed_trace_async
- async def list(self, reserved_ip: str, **kwargs: Any) -> JSON:
+ @overload
+ async def post(
+ self,
+ reserved_ipv6: str,
+ body: Optional[JSON] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """List All Actions for a Reserved IP.
+ """Initiate a Reserved IPv6 Action.
- To retrieve all actions that have been executed on a reserved IP, send a GET request to
- ``/v2/reserved_ips/$RESERVED_IP/actions``.
+ To initiate an action on a reserved IPv6 send a POST request to
+ ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request,
+ set the ``type`` attribute to on of the supported action types:
- :param reserved_ip: A reserved IP address. Required.
- :type reserved_ip: str
+ .. list-table::
+ :header-rows: 1
+
+ * - Action
+ - Details
+ * - ``assign``
+ - Assigns a reserved IPv6 to a Droplet
+ * - ``unassign``
+ - Unassign a reserved IPv6 from a Droplet.
+
+ :param reserved_ipv6: A reserved IPv6 address. Required.
+ :type reserved_ipv6: str
+ :param body: The ``type`` attribute set in the request body will specify the action that
+ will be taken on the reserved IPv6. Default value is None.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -173196,57 +183566,50 @@ async def list(self, reserved_ip: str, **kwargs: Any) -> JSON:
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # JSON input template you can fill out and use as your body input.
+ body = {}
+
+ # response body for status code(s): 201
response == {
- "meta": {
- "total": 0 # Optional. Number of objects returned by the request.
- },
- "actions": [
- {
- "completed_at": "2020-02-20 00:00:00", # Optional. A time
- value given in ISO8601 combined date and time format that represents when
- the action was completed.
- "id": 0, # Optional. A unique numeric ID that can be used to
- identify and reference an action.
- "region": {
- "available": bool, # This is a boolean value that
- represents whether new Droplets can be created in this region.
- Required.
- "features": [
- "str" # This attribute is set to an array
- which contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region.
- This will be a full name that is used in the control panel and other
- interfaces. Required.
- "sizes": [
- "str" # This attribute is set to an array
- which contains the identifying slugs for the sizes available in
- this region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used
- as a unique identifier for each region. Required.
- },
- "region_slug": "str", # Optional. A human-readable string
- that is used as a unique identifier for each region.
- "resource_id": 0, # Optional. A unique identifier for the
- resource that the action is associated with.
- "resource_type": "str", # Optional. The type of resource
- that the action is associated with.
- "started_at": "2020-02-20 00:00:00", # Optional. A time
- value given in ISO8601 combined date and time format that represents when
- the action was initiated.
- "status": "in-progress", # Optional. Default value is
- "in-progress". The current status of the action. This can be
- "in-progress", "completed", or "errored". Known values are:
- "in-progress", "completed", and "errored".
- "type": "str" # Optional. This is the type of action that
- the object represents. For example, this could be "transfer" to represent
- the state of an image transfer action.
- }
- ],
- "links": {
- "pages": {}
+ "action": {
+ "completed_at": "2020-02-20 00:00:00", # Optional. A time value
+ given in ISO8601 combined date and time format that represents when the
+ action was completed.
+ "id": 0, # Optional. A unique numeric ID that can be used to
+ identify and reference an action.
+ "region": {
+ "available": bool, # This is a boolean value that represents
+ whether new Droplets can be created in this region. Required.
+ "features": [
+ "str" # This attribute is set to an array which
+ contains features available in this region. Required.
+ ],
+ "name": "str", # The display name of the region. This will
+ be a full name that is used in the control panel and other interfaces.
+ Required.
+ "sizes": [
+ "str" # This attribute is set to an array which
+ contains the identifying slugs for the sizes available in this
+ region. sizes:read is required to view. Required.
+ ],
+ "slug": "str" # A human-readable string that is used as a
+ unique identifier for each region. Required.
+ },
+ "region_slug": "str", # Optional. A human-readable string that is
+ used as a unique identifier for each region.
+ "resource_id": 0, # Optional. A unique identifier for the resource
+ that the action is associated with.
+ "resource_type": "str", # Optional. The type of resource that the
+ action is associated with.
+ "started_at": "2020-02-20 00:00:00", # Optional. A time value given
+ in ISO8601 combined date and time format that represents when the action was
+ initiated.
+ "status": "in-progress", # Optional. Default value is "in-progress".
+ The current status of the action. This can be "in-progress", "completed", or
+ "errored". Known values are: "in-progress", "completed", and "errored".
+ "type": "str" # Optional. This is the type of action that the object
+ represents. For example, this could be "transfer" to represent the state of
+ an image transfer action.
}
}
# response body for status code(s): 404
@@ -173261,98 +183624,21 @@ async def list(self, reserved_ip: str, **kwargs: Any) -> JSON:
tickets to help identify the issue.
}
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- _request = build_reserved_ips_actions_list_request(
- reserved_ip=reserved_ip,
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- await self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200, 404]:
- if _stream:
- await response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
@overload
async def post(
self,
- reserved_ip: str,
- body: Optional[JSON] = None,
+ reserved_ipv6: str,
+ body: Optional[IO[bytes]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Initiate a Reserved IP Action.
+ """Initiate a Reserved IPv6 Action.
- To initiate an action on a reserved IP send a POST request to
- ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request,
+ To initiate an action on a reserved IPv6 send a POST request to
+ ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request,
set the ``type`` attribute to on of the supported action types:
.. list-table::
@@ -173361,16 +183647,16 @@ async def post(
* - Action
- Details
* - ``assign``
- - Assigns a reserved IP to a Droplet
+ - Assigns a reserved IPv6 to a Droplet
* - ``unassign``
- - Unassign a reserved IP from a Droplet.
+ - Unassign a reserved IPv6 from a Droplet.
- :param reserved_ip: A reserved IP address. Required.
- :type reserved_ip: str
+ :param reserved_ipv6: A reserved IPv6 address. Required.
+ :type reserved_ipv6: str
:param body: The ``type`` attribute set in the request body will specify the action that
- will be taken on the reserved IP. Default value is None.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ will be taken on the reserved IPv6. Default value is None.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: JSON object
@@ -173380,9 +183666,6 @@ async def post(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {}
-
# response body for status code(s): 201
response == {
"action": {
@@ -173391,8 +183674,6 @@ async def post(
action was completed.
"id": 0, # Optional. A unique numeric ID that can be used to
identify and reference an action.
- "project_id": "str", # Optional. The UUID of the project to which
- the reserved IP currently belongs.
"region": {
"available": bool, # This is a boolean value that represents
whether new Droplets can be created in this region. Required.
@@ -173441,20 +183722,18 @@ async def post(
}
"""
- @overload
+ @distributed_trace_async
async def post(
self,
- reserved_ip: str,
- body: Optional[IO[bytes]] = None,
- *,
- content_type: str = "application/json",
+ reserved_ipv6: str,
+ body: Optional[Union[JSON, IO[bytes]]] = None,
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Initiate a Reserved IP Action.
+ """Initiate a Reserved IPv6 Action.
- To initiate an action on a reserved IP send a POST request to
- ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request,
+ To initiate an action on a reserved IPv6 send a POST request to
+ ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request,
set the ``type`` attribute to on of the supported action types:
.. list-table::
@@ -173463,18 +183742,16 @@ async def post(
* - Action
- Details
* - ``assign``
- - Assigns a reserved IP to a Droplet
+ - Assigns a reserved IPv6 to a Droplet
* - ``unassign``
- - Unassign a reserved IP from a Droplet.
+ - Unassign a reserved IPv6 from a Droplet.
- :param reserved_ip: A reserved IP address. Required.
- :type reserved_ip: str
+ :param reserved_ipv6: A reserved IPv6 address. Required.
+ :type reserved_ipv6: str
:param body: The ``type`` attribute set in the request body will specify the action that
- will be taken on the reserved IP. Default value is None.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
+ will be taken on the reserved IPv6. Is either a JSON type or a IO[bytes] type. Default value
+ is None.
+ :type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -173482,6 +183759,9 @@ async def post(
Example:
.. code-block:: python
+ # JSON input template you can fill out and use as your body input.
+ body = {}
+
# response body for status code(s): 201
response == {
"action": {
@@ -173490,8 +183770,6 @@ async def post(
action was completed.
"id": 0, # Optional. A unique numeric ID that can be used to
identify and reference an action.
- "project_id": "str", # Optional. The UUID of the project to which
- the reserved IP currently belongs.
"region": {
"available": bool, # This is a boolean value that represents
whether new Droplets can be created in this region. Required.
@@ -173527,7 +183805,215 @@ async def post(
an image transfer action.
}
}
- # response body for status code(s): 404
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = kwargs.pop("params", {}) or {}
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = body
+ else:
+ _json = None
+
+ _request = build_reserved_ipv6_actions_post_request(
+ reserved_ipv6=reserved_ipv6,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+
+class ByoipPrefixesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~pydo.aio.GeneratedClient`'s
+ :attr:`byoip_prefixes` attribute.
+ """
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = (
+ input_args.pop(0) if input_args else kwargs.pop("deserializer")
+ )
+
+ @overload
+ async def create(
+ self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Create a BYOIP Prefix.
+
+ To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``.
+
+ A successful request will initiate the process of bringing your BYOIP Prefix into your account.
+ The response will include the details of the created prefix, including its UUID and status.
+
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "prefix": "str", # The IP prefix in CIDR notation to bring. Required.
+ "region": "str", # The region where the prefix will be created. Required.
+ "signature": "str" # The signature hash for the prefix creation request.
+ Required.
+ }
+
+ # response body for status code(s): 202
+ response == {
+ "region": "str", # Optional. The region where the prefix is created.
+ "status": "str", # Optional. The status of the BYOIP prefix.
+ "uuid": "str" # Optional. The unique identifier for the BYOIP prefix.
+ }
+ # response body for status code(s): 422
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def create(
+ self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Create a BYOIP Prefix.
+
+ To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``.
+
+ A successful request will initiate the process of bringing your BYOIP Prefix into your account.
+ The response will include the details of the created prefix, including its UUID and status.
+
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 202
+ response == {
+ "region": "str", # Optional. The region where the prefix is created.
+ "status": "str", # Optional. The status of the BYOIP prefix.
+ "uuid": "str" # Optional. The unique identifier for the BYOIP prefix.
+ }
+ # response body for status code(s): 422
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -173541,34 +184027,16 @@ async def post(
"""
@distributed_trace_async
- async def post(
- self,
- reserved_ip: str,
- body: Optional[Union[JSON, IO[bytes]]] = None,
- **kwargs: Any
- ) -> JSON:
+ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Initiate a Reserved IP Action.
-
- To initiate an action on a reserved IP send a POST request to
- ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request,
- set the ``type`` attribute to on of the supported action types:
+ """Create a BYOIP Prefix.
- .. list-table::
- :header-rows: 1
+ To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``.
- * - Action
- - Details
- * - ``assign``
- - Assigns a reserved IP to a Droplet
- * - ``unassign``
- - Unassign a reserved IP from a Droplet.
+ A successful request will initiate the process of bringing your BYOIP Prefix into your account.
+ The response will include the details of the created prefix, including its UUID and status.
- :param reserved_ip: A reserved IP address. Required.
- :type reserved_ip: str
- :param body: The ``type`` attribute set in the request body will specify the action that
- will be taken on the reserved IP. Is either a JSON type or a IO[bytes] type. Default value is
- None.
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
:return: JSON object
:rtype: JSON
@@ -173578,54 +184046,20 @@ async def post(
.. code-block:: python
# JSON input template you can fill out and use as your body input.
- body = {}
+ body = {
+ "prefix": "str", # The IP prefix in CIDR notation to bring. Required.
+ "region": "str", # The region where the prefix will be created. Required.
+ "signature": "str" # The signature hash for the prefix creation request.
+ Required.
+ }
- # response body for status code(s): 201
+ # response body for status code(s): 202
response == {
- "action": {
- "completed_at": "2020-02-20 00:00:00", # Optional. A time value
- given in ISO8601 combined date and time format that represents when the
- action was completed.
- "id": 0, # Optional. A unique numeric ID that can be used to
- identify and reference an action.
- "project_id": "str", # Optional. The UUID of the project to which
- the reserved IP currently belongs.
- "region": {
- "available": bool, # This is a boolean value that represents
- whether new Droplets can be created in this region. Required.
- "features": [
- "str" # This attribute is set to an array which
- contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region. This will
- be a full name that is used in the control panel and other interfaces.
- Required.
- "sizes": [
- "str" # This attribute is set to an array which
- contains the identifying slugs for the sizes available in this
- region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used as a
- unique identifier for each region. Required.
- },
- "region_slug": "str", # Optional. A human-readable string that is
- used as a unique identifier for each region.
- "resource_id": 0, # Optional. A unique identifier for the resource
- that the action is associated with.
- "resource_type": "str", # Optional. The type of resource that the
- action is associated with.
- "started_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the action was
- initiated.
- "status": "in-progress", # Optional. Default value is "in-progress".
- The current status of the action. This can be "in-progress", "completed", or
- "errored". Known values are: "in-progress", "completed", and "errored".
- "type": "str" # Optional. This is the type of action that the object
- represents. For example, this could be "transfer" to represent the state of
- an image transfer action.
- }
+ "region": "str", # Optional. The region where the prefix is created.
+ "status": "str", # Optional. The status of the BYOIP prefix.
+ "uuid": "str" # Optional. The unique identifier for the BYOIP prefix.
}
- # response body for status code(s): 404
+ # response body for status code(s): 422
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -173664,13 +184098,9 @@ async def post(
if isinstance(body, (IOBase, bytes)):
_content = body
else:
- if body is not None:
- _json = body
- else:
- _json = None
+ _json = body
- _request = build_reserved_ips_actions_post_request(
- reserved_ip=reserved_ip,
+ _request = build_byoip_prefixes_create_request(
content_type=content_type,
json=_json,
content=_content,
@@ -173688,14 +184118,14 @@ async def post(
response = pipeline_response.http_response
- if response.status_code not in [201, 404]:
+ if response.status_code not in [202, 422]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 201:
+ if response.status_code == 202:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -173711,7 +184141,7 @@ async def post(
else:
deserialized = None
- if response.status_code == 404:
+ if response.status_code == 422:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -173733,18 +184163,135 @@ async def post(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON:
+ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON:
+ """List BYOIP Prefixes.
+
+ To list all BYOIP prefixes, send a GET request to ``/v2/byoip_prefixes``.
+ A successful response will return a list of all BYOIP prefixes associated with the account.
+
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "byoip_prefixes": [
+ {
+ "advertised": bool, # Optional. Whether the BYOIP prefix is
+ being advertised.
+ "failure_reason": "str", # Optional. Reason for failure, if
+ applicable.
+ "locked": bool, # Optional. Whether the BYOIP prefix is
+ locked.
+ "name": "str", # Optional. Name of the BYOIP prefix.
+ "prefix": "str", # Optional. The IP prefix in CIDR notation.
+ "project_id": "str", # Optional. The ID of the project
+ associated with the BYOIP prefix.
+ "region": "str", # Optional. Region where the BYOIP prefix
+ is located.
+ "status": "str", # Optional. Status of the BYOIP prefix.
+ "uuid": "str", # Optional. Unique identifier for the BYOIP
+ prefix.
+ "validations": [
+ {
+ "name": "str", # Optional. Name of the
+ validation.
+ "note": "str", # Optional. Additional notes
+ or details about the validation.
+ "status": "str" # Optional. Status of the
+ validation.
+ }
+ ]
+ }
+ ],
+ "links": {
+ "pages": {}
+ }
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_byoip_prefixes_list_request(
+ per_page=per_page,
+ page=page,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Retrieve an Existing Reserved IP Action.
+ """Get a BYOIP Prefix.
- To retrieve the status of a reserved IP action, send a GET request to
- ``/v2/reserved_ips/$RESERVED_IP/actions/$ACTION_ID``.
+ To get a BYOIP prefix, send a GET request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``.
- :param reserved_ip: A reserved IP address. Required.
- :type reserved_ip: str
- :param action_id: A unique numeric ID that can be used to identify and reference an action.
- Required.
- :type action_id: int
+ A successful response will return the details of the specified BYOIP prefix.
+
+ :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required.
+ :type byoip_prefix_uuid: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -173754,50 +184301,32 @@ async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "action": {
- "completed_at": "2020-02-20 00:00:00", # Optional. A time value
- given in ISO8601 combined date and time format that represents when the
- action was completed.
- "id": 0, # Optional. A unique numeric ID that can be used to
- identify and reference an action.
- "project_id": "str", # Optional. The UUID of the project to which
- the reserved IP currently belongs.
- "region": {
- "available": bool, # This is a boolean value that represents
- whether new Droplets can be created in this region. Required.
- "features": [
- "str" # This attribute is set to an array which
- contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region. This will
- be a full name that is used in the control panel and other interfaces.
- Required.
- "sizes": [
- "str" # This attribute is set to an array which
- contains the identifying slugs for the sizes available in this
- region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used as a
- unique identifier for each region. Required.
- },
- "region_slug": "str", # Optional. A human-readable string that is
- used as a unique identifier for each region.
- "resource_id": 0, # Optional. A unique identifier for the resource
- that the action is associated with.
- "resource_type": "str", # Optional. The type of resource that the
- action is associated with.
- "started_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the action was
- initiated.
- "status": "in-progress", # Optional. Default value is "in-progress".
- The current status of the action. This can be "in-progress", "completed", or
- "errored". Known values are: "in-progress", "completed", and "errored".
- "type": "str" # Optional. This is the type of action that the object
- represents. For example, this could be "transfer" to represent the state of
- an image transfer action.
+ "byoip_prefix": {
+ "advertised": bool, # Optional. Whether the BYOIP prefix is being
+ advertised.
+ "failure_reason": "str", # Optional. Reason for failure, if
+ applicable.
+ "locked": bool, # Optional. Whether the BYOIP prefix is locked.
+ "name": "str", # Optional. Name of the BYOIP prefix.
+ "prefix": "str", # Optional. The IP prefix in CIDR notation.
+ "project_id": "str", # Optional. The ID of the project associated
+ with the BYOIP prefix.
+ "region": "str", # Optional. Region where the BYOIP prefix is
+ located.
+ "status": "str", # Optional. Status of the BYOIP prefix.
+ "uuid": "str", # Optional. Unique identifier for the BYOIP prefix.
+ "validations": [
+ {
+ "name": "str", # Optional. Name of the validation.
+ "note": "str", # Optional. Additional notes or
+ details about the validation.
+ "status": "str" # Optional. Status of the
+ validation.
+ }
+ ]
}
}
- # response body for status code(s): 404
+ # response body for status code(s): 404, 422
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -173827,9 +184356,8 @@ async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON:
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_reserved_ips_actions_get_request(
- reserved_ip=reserved_ip,
- action_id=action_id,
+ _request = build_byoip_prefixes_get_request(
+ byoip_prefix_uuid=byoip_prefix_uuid,
headers=_headers,
params=_params,
)
@@ -173844,7 +184372,7 @@ async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [200, 404, 422]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -173883,69 +184411,57 @@ async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON:
else:
deserialized = None
+ if response.status_code == 422:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
if cls:
return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
return cast(JSON, deserialized) # type: ignore
-
-class ReservedIPv6Operations:
- """
- .. warning::
- **DO NOT** instantiate this class directly.
-
- Instead, you should access the following operations through
- :class:`~pydo.aio.GeneratedClient`'s
- :attr:`reserved_ipv6` attribute.
- """
-
- def __init__(self, *args, **kwargs) -> None:
- input_args = list(args)
- self._client = input_args.pop(0) if input_args else kwargs.pop("client")
- self._config = input_args.pop(0) if input_args else kwargs.pop("config")
- self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
- self._deserialize = (
- input_args.pop(0) if input_args else kwargs.pop("deserializer")
- )
-
@distributed_trace_async
- async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON:
+ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]:
# pylint: disable=line-too-long
- """List All Reserved IPv6s.
+ """Delete a BYOIP Prefix.
- To list all of the reserved IPv6s available on your account, send a GET request to
- ``/v2/reserved_ipv6``.
+ To delete a BYOIP prefix and remove it from your account, send a DELETE request
+ to ``/v2/byoip_prefixes/$byoip_prefix_uuid``.
- :keyword per_page: Number of items returned per page. Default value is 20.
- :paramtype per_page: int
- :keyword page: Which 'page' of paginated results to return. Default value is 1.
- :paramtype page: int
- :return: JSON object
- :rtype: JSON
+ A successful request will receive a 202 status code with no body in response.
+ This indicates that the request was accepted and the prefix is being deleted.
+
+ :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required.
+ :type byoip_prefix_uuid: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
+ # response body for status code(s): 404, 422
response == {
- "meta": {
- "total": 0 # Optional. Number of objects returned by the request.
- },
- "links": {
- "pages": {}
- },
- "reserved_ipv6s": [
- {
- "droplet": {},
- "ip": "str", # Optional. The public IP address of the
- reserved IPv6. It also serves as its identifier.
- "region_slug": "str", # Optional. The region that the
- reserved IPv6 is reserved to. When you query a reserved IPv6,the
- region_slug will be returned.
- "reserved_at": "2020-02-20 00:00:00" # Optional.
- }
- ]
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
}
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
@@ -173964,11 +184480,10 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_reserved_ipv6_list_request(
- per_page=per_page,
- page=page,
+ _request = build_byoip_prefixes_delete_request(
+ byoip_prefix_uuid=byoip_prefix_uuid,
headers=_headers,
params=_params,
)
@@ -173983,46 +184498,81 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO
response = pipeline_response.http_response
- if response.status_code not in [200]:
+ if response.status_code not in [202, 404, 422]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ if response.status_code == 202:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 422:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
@overload
- async def create(
- self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
+ async def patch(
+ self,
+ byoip_prefix_uuid: str,
+ body: JSON,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create a New Reserved IPv6.
-
- On creation, a reserved IPv6 must be reserved to a region.
+ """Update a BYOIP Prefix.
+ To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``.
- * To create a new reserved IPv6 reserved to a region, send a POST request to
- ``/v2/reserved_ipv6`` with the ``region_slug`` attribute.
+ Currently, you can update the advertisement status of the prefix.
+ The response will include the updated details of the prefix.
+ :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required.
+ :type byoip_prefix_uuid: str
:param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
@@ -174037,36 +184587,68 @@ async def create(
# JSON input template you can fill out and use as your body input.
body = {
- "region_slug": "str" # The slug identifier for the region the reserved IPv6
- will be reserved to. Required.
+ "advertise": bool # Optional. Whether the BYOIP prefix should be advertised.
}
- # response body for status code(s): 201
+ # response body for status code(s): 202
response == {
- "reserved_ipv6": {
- "ip": "str", # Optional. The public IP address of the reserved IPv6.
- It also serves as its identifier.
- "region_slug": "str", # Optional. The region that the reserved IPv6
- is reserved to. When you query a reserved IPv6,the region_slug will be
- returned.
- "reserved_at": "2020-02-20 00:00:00" # Optional.
+ "byoip_prefix": {
+ "advertised": bool, # Optional. Whether the BYOIP prefix is being
+ advertised.
+ "failure_reason": "str", # Optional. Reason for failure, if
+ applicable.
+ "locked": bool, # Optional. Whether the BYOIP prefix is locked.
+ "name": "str", # Optional. Name of the BYOIP prefix.
+ "prefix": "str", # Optional. The IP prefix in CIDR notation.
+ "project_id": "str", # Optional. The ID of the project associated
+ with the BYOIP prefix.
+ "region": "str", # Optional. Region where the BYOIP prefix is
+ located.
+ "status": "str", # Optional. Status of the BYOIP prefix.
+ "uuid": "str", # Optional. Unique identifier for the BYOIP prefix.
+ "validations": [
+ {
+ "name": "str", # Optional. Name of the validation.
+ "note": "str", # Optional. Additional notes or
+ details about the validation.
+ "status": "str" # Optional. Status of the
+ validation.
+ }
+ ]
}
}
+ # response body for status code(s): 404, 422
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
"""
@overload
- async def create(
- self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
+ async def patch(
+ self,
+ byoip_prefix_uuid: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Create a New Reserved IPv6.
-
- On creation, a reserved IPv6 must be reserved to a region.
+ """Update a BYOIP Prefix.
+ To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``.
- * To create a new reserved IPv6 reserved to a region, send a POST request to
- ``/v2/reserved_ipv6`` with the ``region_slug`` attribute.
+ Currently, you can update the advertisement status of the prefix.
+ The response will include the updated details of the prefix.
+ :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required.
+ :type byoip_prefix_uuid: str
:param body: Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
@@ -174079,30 +184661,60 @@ async def create(
Example:
.. code-block:: python
- # response body for status code(s): 201
+ # response body for status code(s): 202
response == {
- "reserved_ipv6": {
- "ip": "str", # Optional. The public IP address of the reserved IPv6.
- It also serves as its identifier.
- "region_slug": "str", # Optional. The region that the reserved IPv6
- is reserved to. When you query a reserved IPv6,the region_slug will be
- returned.
- "reserved_at": "2020-02-20 00:00:00" # Optional.
+ "byoip_prefix": {
+ "advertised": bool, # Optional. Whether the BYOIP prefix is being
+ advertised.
+ "failure_reason": "str", # Optional. Reason for failure, if
+ applicable.
+ "locked": bool, # Optional. Whether the BYOIP prefix is locked.
+ "name": "str", # Optional. Name of the BYOIP prefix.
+ "prefix": "str", # Optional. The IP prefix in CIDR notation.
+ "project_id": "str", # Optional. The ID of the project associated
+ with the BYOIP prefix.
+ "region": "str", # Optional. Region where the BYOIP prefix is
+ located.
+ "status": "str", # Optional. Status of the BYOIP prefix.
+ "uuid": "str", # Optional. Unique identifier for the BYOIP prefix.
+ "validations": [
+ {
+ "name": "str", # Optional. Name of the validation.
+ "note": "str", # Optional. Additional notes or
+ details about the validation.
+ "status": "str" # Optional. Status of the
+ validation.
+ }
+ ]
}
}
+ # response body for status code(s): 404, 422
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
"""
@distributed_trace_async
- async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
+ async def patch(
+ self, byoip_prefix_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Create a New Reserved IPv6.
-
- On creation, a reserved IPv6 must be reserved to a region.
+ """Update a BYOIP Prefix.
+ To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``.
- * To create a new reserved IPv6 reserved to a region, send a POST request to
- ``/v2/reserved_ipv6`` with the ``region_slug`` attribute.
+ Currently, you can update the advertisement status of the prefix.
+ The response will include the updated details of the prefix.
+ :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required.
+ :type byoip_prefix_uuid: str
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
:return: JSON object
@@ -174114,21 +184726,47 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
# JSON input template you can fill out and use as your body input.
body = {
- "region_slug": "str" # The slug identifier for the region the reserved IPv6
- will be reserved to. Required.
+ "advertise": bool # Optional. Whether the BYOIP prefix should be advertised.
}
- # response body for status code(s): 201
+ # response body for status code(s): 202
response == {
- "reserved_ipv6": {
- "ip": "str", # Optional. The public IP address of the reserved IPv6.
- It also serves as its identifier.
- "region_slug": "str", # Optional. The region that the reserved IPv6
- is reserved to. When you query a reserved IPv6,the region_slug will be
- returned.
- "reserved_at": "2020-02-20 00:00:00" # Optional.
+ "byoip_prefix": {
+ "advertised": bool, # Optional. Whether the BYOIP prefix is being
+ advertised.
+ "failure_reason": "str", # Optional. Reason for failure, if
+ applicable.
+ "locked": bool, # Optional. Whether the BYOIP prefix is locked.
+ "name": "str", # Optional. Name of the BYOIP prefix.
+ "prefix": "str", # Optional. The IP prefix in CIDR notation.
+ "project_id": "str", # Optional. The ID of the project associated
+ with the BYOIP prefix.
+ "region": "str", # Optional. Region where the BYOIP prefix is
+ located.
+ "status": "str", # Optional. Status of the BYOIP prefix.
+ "uuid": "str", # Optional. Unique identifier for the BYOIP prefix.
+ "validations": [
+ {
+ "name": "str", # Optional. Name of the validation.
+ "note": "str", # Optional. Additional notes or
+ details about the validation.
+ "status": "str" # Optional. Status of the
+ validation.
+ }
+ ]
}
}
+ # response body for status code(s): 404, 422
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
404: ResourceNotFoundError,
@@ -174159,7 +184797,8 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
else:
_json = body
- _request = build_reserved_ipv6_create_request(
+ _request = build_byoip_prefixes_patch_request(
+ byoip_prefix_uuid=byoip_prefix_uuid,
content_type=content_type,
json=_json,
content=_content,
@@ -174177,27 +184816,60 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [201]:
+ if response.status_code not in [202, 404, 422]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ if response.status_code == 202:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 422:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
@@ -174205,15 +184877,29 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON:
+ async def list_resources(
+ self,
+ byoip_prefix_uuid: str,
+ *,
+ per_page: int = 20,
+ page: int = 1,
+ **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Retrieve an Existing Reserved IPv6.
+ """List BYOIP Prefix Resources.
- To show information about a reserved IPv6, send a GET request to
- ``/v2/reserved_ipv6/$RESERVED_IPV6``.
+ To list resources associated with BYOIP prefixes, send a GET request to
+ ``/v2/byoip_prefixes/{byoip_prefix_uuid}/ips``.
- :param reserved_ipv6: A reserved IPv6 address. Required.
- :type reserved_ipv6: str
+ A successful response will return a list of resources associated with the specified BYOIP
+ prefix.
+
+ :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required.
+ :type byoip_prefix_uuid: str
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -174223,15 +184909,23 @@ async def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "reserved_ipv6": {
- "droplet": {},
- "ip": "str", # Optional. The public IP address of the reserved IPv6.
- It also serves as its identifier.
- "region_slug": "str", # Optional. The region that the reserved IPv6
- is reserved to. When you query a reserved IPv6,the region_slug will be
- returned.
- "reserved_at": "2020-02-20 00:00:00" # Optional. The date and time
- when the reserved IPv6 was reserved.
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "ips": [
+ {
+ "assigned_at": "2020-02-20 00:00:00", # Optional. Time when
+ the allocation was assigned.
+ "byoip": "str", # Optional. The BYOIP prefix UUID.
+ "id": 0, # Optional. Unique identifier for the allocation.
+ "region": "str", # Optional. Region where the allocation is
+ made.
+ "resource": "str" # Optional. The resource associated with
+ the allocation.
+ }
+ ],
+ "links": {
+ "pages": {}
}
}
# response body for status code(s): 404
@@ -174264,8 +184958,10 @@ async def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON:
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_reserved_ipv6_get_request(
- reserved_ipv6=reserved_ipv6,
+ _request = build_byoip_prefixes_list_resources_request(
+ byoip_prefix_uuid=byoip_prefix_uuid,
+ per_page=per_page,
+ page=page,
headers=_headers,
params=_params,
)
@@ -174324,27 +185020,96 @@ async def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON:
return cast(JSON, deserialized) # type: ignore
+
+class SecurityOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~pydo.aio.GeneratedClient`'s
+ :attr:`security` attribute.
+ """
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = (
+ input_args.pop(0) if input_args else kwargs.pop("deserializer")
+ )
+
@distributed_trace_async
- async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]:
+ async def list_scans(
+ self, *, per_page: int = 20, page: int = 1, **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Delete a Reserved IPv6.
+ """List Scans.
- To delete a reserved IP and remove it from your account, send a DELETE request
- to ``/v2/reserved_ipv6/$RESERVED_IPV6``.
-
- A successful request will receive a 204 status code with no body in response.
- This indicates that the request was processed successfully.
+ To list all CSPM scans, send a GET request to ``/v2/security/scans``.
- :param reserved_ipv6: A reserved IPv6 address. Required.
- :type reserved_ipv6: str
- :return: JSON object or None
- :rtype: JSON or None
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 404, 422
+ # response body for status code(s): 200
+ response == {
+ "meta": {
+ "total": 0 # Optional. Number of objects returned by the request.
+ },
+ "links": {
+ "pages": {}
+ },
+ "scans": [
+ {
+ "created_at": "2020-02-20 00:00:00", # Optional. When scan
+ was created.
+ "findings": [
+ {
+ "affected_resources_count": 0, # Optional.
+ The number of affected resources for the finding.
+ "business_impact": "str", # Optional. A
+ description of the business impact of the finding.
+ "details": "str", # Optional. A description
+ of the risk associated with the finding.
+ "found_at": "2020-02-20 00:00:00", #
+ Optional. When the finding was discovered.
+ "mitigation_steps": [
+ {
+ "description": "str", #
+ Optional. description.
+ "step": 0, # Optional. step.
+ "title": "str" # Optional.
+ title.
+ }
+ ],
+ "name": "str", # Optional. The name of the
+ rule that triggered the finding.
+ "rule_uuid": "str", # Optional. The unique
+ identifier for the rule that triggered the finding.
+ "severity": "str", # Optional. The severity
+ of the finding. Known values are: "CRITICAL", "HIGH", "MEDIUM",
+ and "LOW".
+ "technical_details": "str" # Optional. A
+ description of the technical details related to the finding.
+ }
+ ],
+ "id": "str", # Optional. The unique identifier for the scan.
+ "status": "str" # Optional. The status of the scan. Known
+ values are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and
+ "SCAN_NOT_RUN".
+ }
+ ]
+ }
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -174372,10 +185137,11 @@ async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]:
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_reserved_ipv6_delete_request(
- reserved_ipv6=reserved_ipv6,
+ _request = build_security_list_scans_request(
+ per_page=per_page,
+ page=page,
headers=_headers,
params=_params,
)
@@ -174390,26 +185156,14 @@ async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]:
response = pipeline_response.http_response
- if response.status_code not in [204, 404, 422]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 204:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.status_code == 404:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -174425,7 +185179,7 @@ async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]:
else:
deserialized = None
- if response.status_code == 422:
+ if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -174442,64 +185196,17 @@ async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]:
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
-
- return deserialized # type: ignore
-
-
-class ReservedIPv6ActionsOperations:
- """
- .. warning::
- **DO NOT** instantiate this class directly.
-
- Instead, you should access the following operations through
- :class:`~pydo.aio.GeneratedClient`'s
- :attr:`reserved_ipv6_actions` attribute.
- """
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- def __init__(self, *args, **kwargs) -> None:
- input_args = list(args)
- self._client = input_args.pop(0) if input_args else kwargs.pop("client")
- self._config = input_args.pop(0) if input_args else kwargs.pop("config")
- self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
- self._deserialize = (
- input_args.pop(0) if input_args else kwargs.pop("deserializer")
- )
+ return cast(JSON, deserialized) # type: ignore
- @overload
- async def post(
- self,
- reserved_ipv6: str,
- body: Optional[JSON] = None,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> JSON:
+ @distributed_trace_async
+ async def create_scan(self, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Initiate a Reserved IPv6 Action.
-
- To initiate an action on a reserved IPv6 send a POST request to
- ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request,
- set the ``type`` attribute to on of the supported action types:
-
- .. list-table::
- :header-rows: 1
+ """Create Scan.
- * - Action
- - Details
- * - ``assign``
- - Assigns a reserved IPv6 to a Droplet
- * - ``unassign``
- - Unassign a reserved IPv6 from a Droplet.
+ To create a CSPM scan, send a POST request to ``/v2/security/scans``.
- :param reserved_ipv6: A reserved IPv6 address. Required.
- :type reserved_ipv6: str
- :param body: The ``type`` attribute set in the request body will specify the action that
- will be taken on the reserved IPv6. Default value is None.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -174507,53 +185214,46 @@ async def post(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {}
-
# response body for status code(s): 201
response == {
- "action": {
- "completed_at": "2020-02-20 00:00:00", # Optional. A time value
- given in ISO8601 combined date and time format that represents when the
- action was completed.
- "id": 0, # Optional. A unique numeric ID that can be used to
- identify and reference an action.
- "region": {
- "available": bool, # This is a boolean value that represents
- whether new Droplets can be created in this region. Required.
- "features": [
- "str" # This attribute is set to an array which
- contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region. This will
- be a full name that is used in the control panel and other interfaces.
- Required.
- "sizes": [
- "str" # This attribute is set to an array which
- contains the identifying slugs for the sizes available in this
- region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used as a
- unique identifier for each region. Required.
- },
- "region_slug": "str", # Optional. A human-readable string that is
- used as a unique identifier for each region.
- "resource_id": 0, # Optional. A unique identifier for the resource
- that the action is associated with.
- "resource_type": "str", # Optional. The type of resource that the
- action is associated with.
- "started_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the action was
- initiated.
- "status": "in-progress", # Optional. Default value is "in-progress".
- The current status of the action. This can be "in-progress", "completed", or
- "errored". Known values are: "in-progress", "completed", and "errored".
- "type": "str" # Optional. This is the type of action that the object
- represents. For example, this could be "transfer" to represent the state of
- an image transfer action.
+ "scan": {
+ "created_at": "2020-02-20 00:00:00", # Optional. When scan was
+ created.
+ "findings": [
+ {
+ "affected_resources_count": 0, # Optional. The
+ number of affected resources for the finding.
+ "business_impact": "str", # Optional. A description
+ of the business impact of the finding.
+ "details": "str", # Optional. A description of the
+ risk associated with the finding.
+ "found_at": "2020-02-20 00:00:00", # Optional. When
+ the finding was discovered.
+ "mitigation_steps": [
+ {
+ "description": "str", # Optional.
+ description.
+ "step": 0, # Optional. step.
+ "title": "str" # Optional. title.
+ }
+ ],
+ "name": "str", # Optional. The name of the rule that
+ triggered the finding.
+ "rule_uuid": "str", # Optional. The unique
+ identifier for the rule that triggered the finding.
+ "severity": "str", # Optional. The severity of the
+ finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", and "LOW".
+ "technical_details": "str" # Optional. A description
+ of the technical details related to the finding.
+ }
+ ],
+ "id": "str", # Optional. The unique identifier for the scan.
+ "status": "str" # Optional. The status of the scan. Known values
+ are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and
+ "SCAN_NOT_RUN".
}
}
- # response body for status code(s): 404
+ # response body for status code(s): 400, 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -174565,41 +185265,126 @@ async def post(
tickets to help identify the issue.
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_security_create_scan_request(
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201, 400, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- @overload
- async def post(
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 400:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace_async
+ async def get_scan(
self,
- reserved_ipv6: str,
- body: Optional[IO[bytes]] = None,
+ scan_id: str,
*,
- content_type: str = "application/json",
+ severity: Optional[str] = None,
+ per_page: int = 20,
+ page: int = 1,
+ type: Optional[str] = None,
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Initiate a Reserved IPv6 Action.
-
- To initiate an action on a reserved IPv6 send a POST request to
- ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request,
- set the ``type`` attribute to on of the supported action types:
-
- .. list-table::
- :header-rows: 1
+ """Get Scan.
- * - Action
- - Details
- * - ``assign``
- - Assigns a reserved IPv6 to a Droplet
- * - ``unassign``
- - Unassign a reserved IPv6 from a Droplet.
+ To get a CSPM scan by ID, send a GET request to ``/v2/security/scans/{scan_id}``.
- :param reserved_ipv6: A reserved IPv6 address. Required.
- :type reserved_ipv6: str
- :param body: The ``type`` attribute set in the request body will specify the action that
- will be taken on the reserved IPv6. Default value is None.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
+ :param scan_id: The scan UUID. Required.
+ :type scan_id: str
+ :keyword severity: The finding severity level to include. Known values are: "LOW", "MEDIUM",
+ "HIGH", and "CRITICAL". Default value is None.
+ :paramtype severity: str
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
+ :keyword type: The finding type to include. Default value is None.
+ :paramtype type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -174607,47 +185392,43 @@ async def post(
Example:
.. code-block:: python
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
- "action": {
- "completed_at": "2020-02-20 00:00:00", # Optional. A time value
- given in ISO8601 combined date and time format that represents when the
- action was completed.
- "id": 0, # Optional. A unique numeric ID that can be used to
- identify and reference an action.
- "region": {
- "available": bool, # This is a boolean value that represents
- whether new Droplets can be created in this region. Required.
- "features": [
- "str" # This attribute is set to an array which
- contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region. This will
- be a full name that is used in the control panel and other interfaces.
- Required.
- "sizes": [
- "str" # This attribute is set to an array which
- contains the identifying slugs for the sizes available in this
- region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used as a
- unique identifier for each region. Required.
- },
- "region_slug": "str", # Optional. A human-readable string that is
- used as a unique identifier for each region.
- "resource_id": 0, # Optional. A unique identifier for the resource
- that the action is associated with.
- "resource_type": "str", # Optional. The type of resource that the
- action is associated with.
- "started_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the action was
- initiated.
- "status": "in-progress", # Optional. Default value is "in-progress".
- The current status of the action. This can be "in-progress", "completed", or
- "errored". Known values are: "in-progress", "completed", and "errored".
- "type": "str" # Optional. This is the type of action that the object
- represents. For example, this could be "transfer" to represent the state of
- an image transfer action.
+ "scan": {
+ "created_at": "2020-02-20 00:00:00", # Optional. When scan was
+ created.
+ "findings": [
+ {
+ "affected_resources_count": 0, # Optional. The
+ number of affected resources for the finding.
+ "business_impact": "str", # Optional. A description
+ of the business impact of the finding.
+ "details": "str", # Optional. A description of the
+ risk associated with the finding.
+ "found_at": "2020-02-20 00:00:00", # Optional. When
+ the finding was discovered.
+ "mitigation_steps": [
+ {
+ "description": "str", # Optional.
+ description.
+ "step": 0, # Optional. step.
+ "title": "str" # Optional. title.
+ }
+ ],
+ "name": "str", # Optional. The name of the rule that
+ triggered the finding.
+ "rule_uuid": "str", # Optional. The unique
+ identifier for the rule that triggered the finding.
+ "severity": "str", # Optional. The severity of the
+ finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", and "LOW".
+ "technical_details": "str" # Optional. A description
+ of the technical details related to the finding.
+ }
+ ],
+ "id": "str", # Optional. The unique identifier for the scan.
+ "status": "str" # Optional. The status of the scan. Known values
+ are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and
+ "SCAN_NOT_RUN".
}
}
# response body for status code(s): 404
@@ -174662,37 +185443,112 @@ async def post(
tickets to help identify the issue.
}
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_security_get_scan_request(
+ scan_id=scan_id,
+ severity=severity,
+ per_page=per_page,
+ page=page,
+ type=type,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ await response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def post(
+ async def get_latest_scan(
self,
- reserved_ipv6: str,
- body: Optional[Union[JSON, IO[bytes]]] = None,
+ *,
+ per_page: int = 20,
+ page: int = 1,
+ severity: Optional[str] = None,
+ type: Optional[str] = None,
**kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Initiate a Reserved IPv6 Action.
-
- To initiate an action on a reserved IPv6 send a POST request to
- ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request,
- set the ``type`` attribute to on of the supported action types:
-
- .. list-table::
- :header-rows: 1
+ """Get Latest Scan.
- * - Action
- - Details
- * - ``assign``
- - Assigns a reserved IPv6 to a Droplet
- * - ``unassign``
- - Unassign a reserved IPv6 from a Droplet.
+ To get the latest CSPM scan, send a GET request to ``/v2/security/scans/latest``.
- :param reserved_ipv6: A reserved IPv6 address. Required.
- :type reserved_ipv6: str
- :param body: The ``type`` attribute set in the request body will specify the action that
- will be taken on the reserved IPv6. Is either a JSON type or a IO[bytes] type. Default value
- is None.
- :type body: JSON or IO[bytes]
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
+ :keyword severity: The finding severity level to include. Known values are: "LOW", "MEDIUM",
+ "HIGH", and "CRITICAL". Default value is None.
+ :paramtype severity: str
+ :keyword type: The finding type to include. Default value is None.
+ :paramtype type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -174700,50 +185556,43 @@ async def post(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {}
-
- # response body for status code(s): 201
+ # response body for status code(s): 200
response == {
- "action": {
- "completed_at": "2020-02-20 00:00:00", # Optional. A time value
- given in ISO8601 combined date and time format that represents when the
- action was completed.
- "id": 0, # Optional. A unique numeric ID that can be used to
- identify and reference an action.
- "region": {
- "available": bool, # This is a boolean value that represents
- whether new Droplets can be created in this region. Required.
- "features": [
- "str" # This attribute is set to an array which
- contains features available in this region. Required.
- ],
- "name": "str", # The display name of the region. This will
- be a full name that is used in the control panel and other interfaces.
- Required.
- "sizes": [
- "str" # This attribute is set to an array which
- contains the identifying slugs for the sizes available in this
- region. sizes:read is required to view. Required.
- ],
- "slug": "str" # A human-readable string that is used as a
- unique identifier for each region. Required.
- },
- "region_slug": "str", # Optional. A human-readable string that is
- used as a unique identifier for each region.
- "resource_id": 0, # Optional. A unique identifier for the resource
- that the action is associated with.
- "resource_type": "str", # Optional. The type of resource that the
- action is associated with.
- "started_at": "2020-02-20 00:00:00", # Optional. A time value given
- in ISO8601 combined date and time format that represents when the action was
- initiated.
- "status": "in-progress", # Optional. Default value is "in-progress".
- The current status of the action. This can be "in-progress", "completed", or
- "errored". Known values are: "in-progress", "completed", and "errored".
- "type": "str" # Optional. This is the type of action that the object
- represents. For example, this could be "transfer" to represent the state of
- an image transfer action.
+ "scan": {
+ "created_at": "2020-02-20 00:00:00", # Optional. When scan was
+ created.
+ "findings": [
+ {
+ "affected_resources_count": 0, # Optional. The
+ number of affected resources for the finding.
+ "business_impact": "str", # Optional. A description
+ of the business impact of the finding.
+ "details": "str", # Optional. A description of the
+ risk associated with the finding.
+ "found_at": "2020-02-20 00:00:00", # Optional. When
+ the finding was discovered.
+ "mitigation_steps": [
+ {
+ "description": "str", # Optional.
+ description.
+ "step": 0, # Optional. step.
+ "title": "str" # Optional. title.
+ }
+ ],
+ "name": "str", # Optional. The name of the rule that
+ triggered the finding.
+ "rule_uuid": "str", # Optional. The unique
+ identifier for the rule that triggered the finding.
+ "severity": "str", # Optional. The severity of the
+ finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", and "LOW".
+ "technical_details": "str" # Optional. A description
+ of the technical details related to the finding.
+ }
+ ],
+ "id": "str", # Optional. The unique identifier for the scan.
+ "status": "str" # Optional. The status of the scan. Known values
+ are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and
+ "SCAN_NOT_RUN".
}
}
# response body for status code(s): 404
@@ -174771,30 +185620,16 @@ async def post(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
cls: ClsType[JSON] = kwargs.pop("cls", None)
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- if body is not None:
- _json = body
- else:
- _json = None
-
- _request = build_reserved_ipv6_actions_post_request(
- reserved_ipv6=reserved_ipv6,
- content_type=content_type,
- json=_json,
- content=_content,
+ _request = build_security_get_latest_scan_request(
+ per_page=per_page,
+ page=page,
+ severity=severity,
+ type=type,
headers=_headers,
params=_params,
)
@@ -174809,14 +185644,14 @@ async def post(
response = pipeline_response.http_response
- if response.status_code not in [201, 404]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 201:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -174853,45 +185688,23 @@ async def post(
return cast(JSON, deserialized) # type: ignore
-
-class ByoipPrefixesOperations:
- """
- .. warning::
- **DO NOT** instantiate this class directly.
-
- Instead, you should access the following operations through
- :class:`~pydo.aio.GeneratedClient`'s
- :attr:`byoip_prefixes` attribute.
- """
-
- def __init__(self, *args, **kwargs) -> None:
- input_args = list(args)
- self._client = input_args.pop(0) if input_args else kwargs.pop("client")
- self._config = input_args.pop(0) if input_args else kwargs.pop("config")
- self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
- self._deserialize = (
- input_args.pop(0) if input_args else kwargs.pop("deserializer")
- )
-
@overload
- async def create(
+ async def create_scan_rule(
self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Create a BYOIP Prefix.
-
- To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``.
+ """Create Scan Rule.
- A successful request will initiate the process of bringing your BYOIP Prefix into your account.
- The response will include the details of the created prefix, including its UUID and status.
+ To mark a scan finding as a false positive, send a POST request to
+ ``/v2/security/scans/rules`` to create a new scan rule.
:param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -174899,19 +185712,11 @@ async def create(
# JSON input template you can fill out and use as your body input.
body = {
- "prefix": "str", # The IP prefix in CIDR notation to bring. Required.
- "region": "str", # The region where the prefix will be created. Required.
- "signature": "str" # The signature hash for the prefix creation request.
- Required.
+ "resource": "str" # Optional. The URN of a resource to exclude from future
+ scans.
}
- # response body for status code(s): 202
- response == {
- "region": "str", # Optional. The region where the prefix is created.
- "status": "str", # Optional. The status of the BYOIP prefix.
- "uuid": "str" # Optional. The unique identifier for the BYOIP prefix.
- }
- # response body for status code(s): 422
+ # response body for status code(s): 400, 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -174925,36 +185730,28 @@ async def create(
"""
@overload
- async def create(
+ async def create_scan_rule(
self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
- ) -> JSON:
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Create a BYOIP Prefix.
-
- To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``.
+ """Create Scan Rule.
- A successful request will initiate the process of bringing your BYOIP Prefix into your account.
- The response will include the details of the created prefix, including its UUID and status.
+ To mark a scan finding as a false positive, send a POST request to
+ ``/v2/security/scans/rules`` to create a new scan rule.
:param body: Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 202
- response == {
- "region": "str", # Optional. The region where the prefix is created.
- "status": "str", # Optional. The status of the BYOIP prefix.
- "uuid": "str" # Optional. The unique identifier for the BYOIP prefix.
- }
- # response body for status code(s): 422
+ # response body for status code(s): 400, 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -174968,19 +185765,19 @@ async def create(
"""
@distributed_trace_async
- async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
+ async def create_scan_rule(
+ self, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """Create a BYOIP Prefix.
-
- To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``.
+ """Create Scan Rule.
- A successful request will initiate the process of bringing your BYOIP Prefix into your account.
- The response will include the details of the created prefix, including its UUID and status.
+ To mark a scan finding as a false positive, send a POST request to
+ ``/v2/security/scans/rules`` to create a new scan rule.
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
- :return: JSON object
- :rtype: JSON
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
@@ -174988,19 +185785,11 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
# JSON input template you can fill out and use as your body input.
body = {
- "prefix": "str", # The IP prefix in CIDR notation to bring. Required.
- "region": "str", # The region where the prefix will be created. Required.
- "signature": "str" # The signature hash for the prefix creation request.
- Required.
+ "resource": "str" # Optional. The URN of a resource to exclude from future
+ scans.
}
- # response body for status code(s): 202
- response == {
- "region": "str", # Optional. The region where the prefix is created.
- "status": "str", # Optional. The status of the BYOIP prefix.
- "uuid": "str" # Optional. The unique identifier for the BYOIP prefix.
- }
- # response body for status code(s): 422
+ # response body for status code(s): 400, 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -175031,7 +185820,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
content_type: Optional[str] = kwargs.pop(
"content_type", _headers.pop("Content-Type", None)
)
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -175041,7 +185830,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
else:
_json = body
- _request = build_byoip_prefixes_create_request(
+ _request = build_security_create_scan_rule_request(
content_type=content_type,
json=_json,
content=_content,
@@ -175059,14 +185848,26 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [202, 422]:
+ if response.status_code not in [201, 400, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 202:
+ if response.status_code == 201:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.status_code == 400:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -175082,7 +185883,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
else:
deserialized = None
- if response.status_code == 422:
+ if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -175099,17 +185900,30 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
@distributed_trace_async
- async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON:
- """List BYOIP Prefixes.
+ async def list_scan_finding_affected_resources(
+ self,
+ scan_id: str,
+ finding_uuid: str,
+ *,
+ per_page: int = 20,
+ page: int = 1,
+ **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """List Finding Affected Resources.
- To list all BYOIP prefixes, send a GET request to ``/v2/byoip_prefixes``.
- A successful response will return a list of all BYOIP prefixes associated with the account.
+ To get affected resources for a scan finding, send a GET request to
+ ``/v2/security/scans/{scan_id}/findings/{finding_uuid}/affected_resources``.
+ :param scan_id: The scan UUID. Required.
+ :type scan_id: str
+ :param finding_uuid: The finding UUID. Required.
+ :type finding_uuid: str
:keyword per_page: Number of items returned per page. Default value is 20.
:paramtype per_page: int
:keyword page: Which 'page' of paginated results to return. Default value is 1.
@@ -175123,41 +185937,26 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO
# response body for status code(s): 200
response == {
- "meta": {
- "total": 0 # Optional. Number of objects returned by the request.
- },
- "byoip_prefixes": [
+ "affected_resources": [
{
- "advertised": bool, # Optional. Whether the BYOIP prefix is
- being advertised.
- "failure_reason": "str", # Optional. Reason for failure, if
- applicable.
- "locked": bool, # Optional. Whether the BYOIP prefix is
- locked.
- "name": "str", # Optional. Name of the BYOIP prefix.
- "prefix": "str", # Optional. The IP prefix in CIDR notation.
- "project_id": "str", # Optional. The ID of the project
- associated with the BYOIP prefix.
- "region": "str", # Optional. Region where the BYOIP prefix
- is located.
- "status": "str", # Optional. Status of the BYOIP prefix.
- "uuid": "str", # Optional. Unique identifier for the BYOIP
- prefix.
- "validations": [
- {
- "name": "str", # Optional. Name of the
- validation.
- "note": "str", # Optional. Additional notes
- or details about the validation.
- "status": "str" # Optional. Status of the
- validation.
- }
- ]
+ "name": "str", # Optional. The name of the affected
+ resource.
+ "type": "str", # Optional. The type of the affected
+ resource.
+ "urn": "str" # Optional. The URN for the affected resource.
}
- ],
- "links": {
- "pages": {}
- }
+ ]
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
}
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
@@ -175178,7 +185977,9 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_byoip_prefixes_list_request(
+ _request = build_security_list_scan_finding_affected_resources_request(
+ scan_id=scan_id,
+ finding_uuid=finding_uuid,
per_page=per_page,
page=page,
headers=_headers,
@@ -175195,27 +185996,44 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO
response = pipeline_response.http_response
- if response.status_code not in [200]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
@@ -175223,16 +186041,18 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON:
+ async def list_settings(
+ self, *, per_page: int = 20, page: int = 1, **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Get a BYOIP Prefix.
-
- To get a BYOIP prefix, send a GET request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``.
+ """List Settings.
- A successful response will return the details of the specified BYOIP prefix.
+ To list CSPM scan settings, send a GET request to ``/v2/security/settings``.
- :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required.
- :type byoip_prefix_uuid: str
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -175242,32 +186062,61 @@ async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "byoip_prefix": {
- "advertised": bool, # Optional. Whether the BYOIP prefix is being
- advertised.
- "failure_reason": "str", # Optional. Reason for failure, if
- applicable.
- "locked": bool, # Optional. Whether the BYOIP prefix is locked.
- "name": "str", # Optional. Name of the BYOIP prefix.
- "prefix": "str", # Optional. The IP prefix in CIDR notation.
- "project_id": "str", # Optional. The ID of the project associated
- with the BYOIP prefix.
- "region": "str", # Optional. Region where the BYOIP prefix is
- located.
- "status": "str", # Optional. Status of the BYOIP prefix.
- "uuid": "str", # Optional. Unique identifier for the BYOIP prefix.
- "validations": [
- {
- "name": "str", # Optional. Name of the validation.
- "note": "str", # Optional. Additional notes or
- details about the validation.
- "status": "str" # Optional. Status of the
- validation.
- }
- ]
+ "plan_downgrades": {
+ "str": {
+ "effective_at": "2020-02-20 00:00:00", # Optional. When the
+ coverage downgrade takes effect.
+ "resources": [
+ "str" # Optional. URNs of resources that will be
+ downgraded.
+ ]
+ }
+ },
+ "settings": {
+ "suppressions": {
+ "links": {
+ "pages": {
+ "first": "str", # Optional.
+ "last": "str", # Optional.
+ "next": "str", # Optional.
+ "prev": "str" # Optional.
+ }
+ },
+ "meta": {
+ "page": 0, # Optional.
+ "pages": 0, # Optional.
+ "total": 0 # Optional.
+ },
+ "resources": [
+ {
+ "id": "str", # Optional. Unique identifier
+ for the suppressed resource.
+ "resource_id": "str", # Optional. Unique
+ identifier for the resource suppressed.
+ "resource_type": "str", # Optional. Resource
+ type for the resource suppressed.
+ "rule_name": "str", # Optional.
+ Human-readable rule name for the suppressed rule.
+ "rule_uuid": "str" # Optional. Unique
+ identifier for the suppressed rule.
+ }
+ ]
+ }
+ },
+ "tier_coverage": {
+ "str": {
+ "resources": [
+ "str" # Optional. Dictionary of
+ .
+ ],
+ "tags": [
+ "str" # Optional. Dictionary of
+ .
+ ]
+ }
}
}
- # response body for status code(s): 404, 422
+ # response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -175297,8 +186146,9 @@ async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON:
cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_byoip_prefixes_get_request(
- byoip_prefix_uuid=byoip_prefix_uuid,
+ _request = build_security_list_settings_request(
+ per_page=per_page,
+ page=page,
headers=_headers,
params=_params,
)
@@ -175313,7 +186163,7 @@ async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON:
response = pipeline_response.http_response
- if response.status_code not in [200, 404, 422]:
+ if response.status_code not in [200, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
@@ -175352,48 +186202,175 @@ async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON:
else:
deserialized = None
- if response.status_code == 422:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if cls:
return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
return cast(JSON, deserialized) # type: ignore
- @distributed_trace_async
- async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]:
+ @overload
+ async def update_settings_plan(
+ self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
+ ) -> JSON:
# pylint: disable=line-too-long
- """Delete a BYOIP Prefix.
+ """Update Plan.
- To delete a BYOIP prefix and remove it from your account, send a DELETE request
- to ``/v2/byoip_prefixes/$byoip_prefix_uuid``.
+ To update CSPM plan coverage, send a PUT request to ``/v2/security/settings/plan``.
- A successful request will receive a 202 status code with no body in response.
- This indicates that the request was accepted and the prefix is being deleted.
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
- :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required.
- :type byoip_prefix_uuid: str
- :return: JSON object or None
- :rtype: JSON or None
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "tier_coverage": {
+ "str": {
+ "resources": [
+ "str" # Optional. The URNs of resources to scan for
+ the tier.
+ ],
+ "tags": [
+ "str" # Optional. Resource tags to scan for the
+ tier.
+ ]
+ }
+ }
+ }
+
+ # response body for status code(s): 200
+ response == {
+ "tier_coverage": {
+ "str": {
+ "resources": [
+ "str" # Optional. Dictionary of
+ .
+ ],
+ "tags": [
+ "str" # Optional. Dictionary of
+ .
+ ]
+ }
+ }
+ }
+ # response body for status code(s): 400, 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @overload
+ async def update_settings_plan(
+ self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update Plan.
+
+ To update CSPM plan coverage, send a PUT request to ``/v2/security/settings/plan``.
+
+ :param body: Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 404, 422
+ # response body for status code(s): 200
+ response == {
+ "tier_coverage": {
+ "str": {
+ "resources": [
+ "str" # Optional. Dictionary of
+ .
+ ],
+ "tags": [
+ "str" # Optional. Dictionary of
+ .
+ ]
+ }
+ }
+ }
+ # response body for status code(s): 400, 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+
+ @distributed_trace_async
+ async def update_settings_plan(
+ self, body: Union[JSON, IO[bytes]], **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Update Plan.
+
+ To update CSPM plan coverage, send a PUT request to ``/v2/security/settings/plan``.
+
+ :param body: Is either a JSON type or a IO[bytes] type. Required.
+ :type body: JSON or IO[bytes]
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "tier_coverage": {
+ "str": {
+ "resources": [
+ "str" # Optional. The URNs of resources to scan for
+ the tier.
+ ],
+ "tags": [
+ "str" # Optional. Resource tags to scan for the
+ tier.
+ ]
+ }
+ }
+ }
+
+ # response body for status code(s): 200
+ response == {
+ "tier_coverage": {
+ "str": {
+ "resources": [
+ "str" # Optional. Dictionary of
+ .
+ ],
+ "tags": [
+ "str" # Optional. Dictionary of
+ .
+ ]
+ }
+ }
+ }
+ # response body for status code(s): 400, 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -175418,13 +186395,26 @@ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]:
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
- _request = build_byoip_prefixes_delete_request(
- byoip_prefix_uuid=byoip_prefix_uuid,
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = body
+
+ _request = build_security_update_settings_plan_request(
+ content_type=content_type,
+ json=_json,
+ content=_content,
headers=_headers,
params=_params,
)
@@ -175439,15 +186429,14 @@ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]:
response = pipeline_response.http_response
- if response.status_code not in [202, 404, 422]:
+ if response.status_code not in [200, 400, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
- deserialized = None
response_headers = {}
- if response.status_code == 202:
+ if response.status_code == 200:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -175458,7 +186447,12 @@ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]:
"int", response.headers.get("ratelimit-reset")
)
- if response.status_code == 404:
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 400:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -175474,7 +186468,7 @@ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]:
else:
deserialized = None
- if response.status_code == 422:
+ if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -175491,29 +186485,19 @@ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]:
deserialized = None
if cls:
- return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
- return deserialized # type: ignore
+ return cast(JSON, deserialized) # type: ignore
@overload
- async def patch(
- self,
- byoip_prefix_uuid: str,
- body: JSON,
- *,
- content_type: str = "application/json",
- **kwargs: Any
+ async def create_suppression(
+ self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update a BYOIP Prefix.
-
- To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``.
+ """Create Suppression.
- Currently, you can update the advertisement status of the prefix.
- The response will include the updated details of the prefix.
+ To suppress scan findings, send a POST request to ``/v2/security/settings/suppressions``.
- :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required.
- :type byoip_prefix_uuid: str
:param body: Required.
:type body: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
@@ -175528,37 +186512,44 @@ async def patch(
# JSON input template you can fill out and use as your body input.
body = {
- "advertise": bool # Optional. Whether the BYOIP prefix should be advertised.
+ "resources": [
+ "str" # Optional. The URNs of resources to suppress for the rule.
+ ],
+ "rule_uuid": "str" # Optional. The rule UUID to suppress for the listed
+ resources.
}
- # response body for status code(s): 202
+ # response body for status code(s): 201
response == {
- "byoip_prefix": {
- "advertised": bool, # Optional. Whether the BYOIP prefix is being
- advertised.
- "failure_reason": "str", # Optional. Reason for failure, if
- applicable.
- "locked": bool, # Optional. Whether the BYOIP prefix is locked.
- "name": "str", # Optional. Name of the BYOIP prefix.
- "prefix": "str", # Optional. The IP prefix in CIDR notation.
- "project_id": "str", # Optional. The ID of the project associated
- with the BYOIP prefix.
- "region": "str", # Optional. Region where the BYOIP prefix is
- located.
- "status": "str", # Optional. Status of the BYOIP prefix.
- "uuid": "str", # Optional. Unique identifier for the BYOIP prefix.
- "validations": [
- {
- "name": "str", # Optional. Name of the validation.
- "note": "str", # Optional. Additional notes or
- details about the validation.
- "status": "str" # Optional. Status of the
- validation.
- }
- ]
- }
+ "links": {
+ "pages": {
+ "first": "str", # Optional.
+ "last": "str", # Optional.
+ "next": "str", # Optional.
+ "prev": "str" # Optional.
+ }
+ },
+ "meta": {
+ "page": 0, # Optional.
+ "pages": 0, # Optional.
+ "total": 0 # Optional.
+ },
+ "resources": [
+ {
+ "id": "str", # Optional. Unique identifier for the
+ suppressed resource.
+ "resource_id": "str", # Optional. Unique identifier for the
+ resource suppressed.
+ "resource_type": "str", # Optional. Resource type for the
+ resource suppressed.
+ "rule_name": "str", # Optional. Human-readable rule name for
+ the suppressed rule.
+ "rule_uuid": "str" # Optional. Unique identifier for the
+ suppressed rule.
+ }
+ ]
}
- # response body for status code(s): 404, 422
+ # response body for status code(s): 400, 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -175572,24 +186563,14 @@ async def patch(
"""
@overload
- async def patch(
- self,
- byoip_prefix_uuid: str,
- body: IO[bytes],
- *,
- content_type: str = "application/json",
- **kwargs: Any
+ async def create_suppression(
+ self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update a BYOIP Prefix.
-
- To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``.
+ """Create Suppression.
- Currently, you can update the advertisement status of the prefix.
- The response will include the updated details of the prefix.
+ To suppress scan findings, send a POST request to ``/v2/security/settings/suppressions``.
- :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required.
- :type byoip_prefix_uuid: str
:param body: Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
@@ -175602,34 +186583,37 @@ async def patch(
Example:
.. code-block:: python
- # response body for status code(s): 202
+ # response body for status code(s): 201
response == {
- "byoip_prefix": {
- "advertised": bool, # Optional. Whether the BYOIP prefix is being
- advertised.
- "failure_reason": "str", # Optional. Reason for failure, if
- applicable.
- "locked": bool, # Optional. Whether the BYOIP prefix is locked.
- "name": "str", # Optional. Name of the BYOIP prefix.
- "prefix": "str", # Optional. The IP prefix in CIDR notation.
- "project_id": "str", # Optional. The ID of the project associated
- with the BYOIP prefix.
- "region": "str", # Optional. Region where the BYOIP prefix is
- located.
- "status": "str", # Optional. Status of the BYOIP prefix.
- "uuid": "str", # Optional. Unique identifier for the BYOIP prefix.
- "validations": [
- {
- "name": "str", # Optional. Name of the validation.
- "note": "str", # Optional. Additional notes or
- details about the validation.
- "status": "str" # Optional. Status of the
- validation.
- }
- ]
- }
+ "links": {
+ "pages": {
+ "first": "str", # Optional.
+ "last": "str", # Optional.
+ "next": "str", # Optional.
+ "prev": "str" # Optional.
+ }
+ },
+ "meta": {
+ "page": 0, # Optional.
+ "pages": 0, # Optional.
+ "total": 0 # Optional.
+ },
+ "resources": [
+ {
+ "id": "str", # Optional. Unique identifier for the
+ suppressed resource.
+ "resource_id": "str", # Optional. Unique identifier for the
+ resource suppressed.
+ "resource_type": "str", # Optional. Resource type for the
+ resource suppressed.
+ "rule_name": "str", # Optional. Human-readable rule name for
+ the suppressed rule.
+ "rule_uuid": "str" # Optional. Unique identifier for the
+ suppressed rule.
+ }
+ ]
}
- # response body for status code(s): 404, 422
+ # response body for status code(s): 400, 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -175643,19 +186627,14 @@ async def patch(
"""
@distributed_trace_async
- async def patch(
- self, byoip_prefix_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any
+ async def create_suppression(
+ self, body: Union[JSON, IO[bytes]], **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update a BYOIP Prefix.
-
- To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``.
+ """Create Suppression.
- Currently, you can update the advertisement status of the prefix.
- The response will include the updated details of the prefix.
+ To suppress scan findings, send a POST request to ``/v2/security/settings/suppressions``.
- :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required.
- :type byoip_prefix_uuid: str
:param body: Is either a JSON type or a IO[bytes] type. Required.
:type body: JSON or IO[bytes]
:return: JSON object
@@ -175667,37 +186646,44 @@ async def patch(
# JSON input template you can fill out and use as your body input.
body = {
- "advertise": bool # Optional. Whether the BYOIP prefix should be advertised.
+ "resources": [
+ "str" # Optional. The URNs of resources to suppress for the rule.
+ ],
+ "rule_uuid": "str" # Optional. The rule UUID to suppress for the listed
+ resources.
}
- # response body for status code(s): 202
+ # response body for status code(s): 201
response == {
- "byoip_prefix": {
- "advertised": bool, # Optional. Whether the BYOIP prefix is being
- advertised.
- "failure_reason": "str", # Optional. Reason for failure, if
- applicable.
- "locked": bool, # Optional. Whether the BYOIP prefix is locked.
- "name": "str", # Optional. Name of the BYOIP prefix.
- "prefix": "str", # Optional. The IP prefix in CIDR notation.
- "project_id": "str", # Optional. The ID of the project associated
- with the BYOIP prefix.
- "region": "str", # Optional. Region where the BYOIP prefix is
- located.
- "status": "str", # Optional. Status of the BYOIP prefix.
- "uuid": "str", # Optional. Unique identifier for the BYOIP prefix.
- "validations": [
- {
- "name": "str", # Optional. Name of the validation.
- "note": "str", # Optional. Additional notes or
- details about the validation.
- "status": "str" # Optional. Status of the
- validation.
- }
- ]
- }
+ "links": {
+ "pages": {
+ "first": "str", # Optional.
+ "last": "str", # Optional.
+ "next": "str", # Optional.
+ "prev": "str" # Optional.
+ }
+ },
+ "meta": {
+ "page": 0, # Optional.
+ "pages": 0, # Optional.
+ "total": 0 # Optional.
+ },
+ "resources": [
+ {
+ "id": "str", # Optional. Unique identifier for the
+ suppressed resource.
+ "resource_id": "str", # Optional. Unique identifier for the
+ resource suppressed.
+ "resource_type": "str", # Optional. Resource type for the
+ resource suppressed.
+ "rule_name": "str", # Optional. Human-readable rule name for
+ the suppressed rule.
+ "rule_uuid": "str" # Optional. Unique identifier for the
+ suppressed rule.
+ }
+ ]
}
- # response body for status code(s): 404, 422
+ # response body for status code(s): 400, 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
returned. For example, the ID for a response returning a 404 status code would
@@ -175738,8 +186724,7 @@ async def patch(
else:
_json = body
- _request = build_byoip_prefixes_patch_request(
- byoip_prefix_uuid=byoip_prefix_uuid,
+ _request = build_security_create_suppression_request(
content_type=content_type,
json=_json,
content=_content,
@@ -175757,14 +186742,14 @@ async def patch(
response = pipeline_response.http_response
- if response.status_code not in [202, 404, 422]:
+ if response.status_code not in [201, 400, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
response_headers = {}
- if response.status_code == 202:
+ if response.status_code == 201:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -175780,7 +186765,7 @@ async def patch(
else:
deserialized = None
- if response.status_code == 404:
+ if response.status_code == 400:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -175796,7 +186781,7 @@ async def patch(
else:
deserialized = None
- if response.status_code == 422:
+ if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -175818,57 +186803,24 @@ async def patch(
return cast(JSON, deserialized) # type: ignore
@distributed_trace_async
- async def list_resources(
- self,
- byoip_prefix_uuid: str,
- *,
- per_page: int = 20,
- page: int = 1,
- **kwargs: Any
- ) -> JSON:
+ async def delete_suppression(
+ self, suppression_uuid: str, **kwargs: Any
+ ) -> Optional[JSON]:
# pylint: disable=line-too-long
- """List BYOIP Prefix Resources.
+ """Delete Suppression.
- To list resources associated with BYOIP prefixes, send a GET request to
- ``/v2/byoip_prefixes/{byoip_prefix_uuid}/ips``.
-
- A successful response will return a list of resources associated with the specified BYOIP
- prefix.
+ To remove a suppression, send a DELETE request to
+ ``/v2/security/settings/suppressions/{suppression_uuid}``.
- :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required.
- :type byoip_prefix_uuid: str
- :keyword per_page: Number of items returned per page. Default value is 20.
- :paramtype per_page: int
- :keyword page: Which 'page' of paginated results to return. Default value is 1.
- :paramtype page: int
- :return: JSON object
- :rtype: JSON
+ :param suppression_uuid: The suppression UUID to remove. Required.
+ :type suppression_uuid: str
+ :return: JSON object or None
+ :rtype: JSON or None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
- # response body for status code(s): 200
- response == {
- "meta": {
- "total": 0 # Optional. Number of objects returned by the request.
- },
- "ips": [
- {
- "assigned_at": "2020-02-20 00:00:00", # Optional. Time when
- the allocation was assigned.
- "byoip": "str", # Optional. The BYOIP prefix UUID.
- "id": 0, # Optional. Unique identifier for the allocation.
- "region": "str", # Optional. Region where the allocation is
- made.
- "resource": "str" # Optional. The resource associated with
- the allocation.
- }
- ],
- "links": {
- "pages": {}
- }
- }
# response body for status code(s): 404
response == {
"id": "str", # A short identifier corresponding to the HTTP status code
@@ -175897,12 +186849,10 @@ async def list_resources(
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
- cls: ClsType[JSON] = kwargs.pop("cls", None)
+ cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None)
- _request = build_byoip_prefixes_list_resources_request(
- byoip_prefix_uuid=byoip_prefix_uuid,
- per_page=per_page,
- page=page,
+ _request = build_security_delete_suppression_request(
+ suppression_uuid=suppression_uuid,
headers=_headers,
params=_params,
)
@@ -175917,14 +186867,15 @@ async def list_resources(
response = pipeline_response.http_response
- if response.status_code not in [200, 404]:
+ if response.status_code not in [204, 404]:
if _stream:
await response.read() # Load the body in memory and close the socket
map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
raise HttpResponseError(response=response)
+ deserialized = None
response_headers = {}
- if response.status_code == 200:
+ if response.status_code == 204:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
)
@@ -175935,11 +186886,6 @@ async def list_resources(
"int", response.headers.get("ratelimit-reset")
)
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
if response.status_code == 404:
response_headers["ratelimit-limit"] = self._deserialize(
"int", response.headers.get("ratelimit-limit")
@@ -175957,9 +186903,9 @@ async def list_resources(
deserialized = None
if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- return cast(JSON, deserialized) # type: ignore
+ return deserialized # type: ignore
class SizesOperations:
diff --git a/src/pydo/operations/__init__.py b/src/pydo/operations/__init__.py
index 4a74b7c..0c8c048 100644
--- a/src/pydo/operations/__init__.py
+++ b/src/pydo/operations/__init__.py
@@ -17,12 +17,14 @@
from ._operations import InvoicesOperations
from ._operations import BillingInsightsOperations
from ._operations import DatabasesOperations
+from ._operations import DedicatedInferencesOperations
from ._operations import DomainsOperations
from ._operations import DropletsOperations
from ._operations import DropletActionsOperations
from ._operations import AutoscalepoolsOperations
from ._operations import FirewallsOperations
from ._operations import FunctionsOperations
+from ._operations import FunctionsAccessKeyOperations
from ._operations import ImagesOperations
from ._operations import ImageActionsOperations
from ._operations import KubernetesOperations
@@ -39,6 +41,7 @@
from ._operations import ReservedIPv6Operations
from ._operations import ReservedIPv6ActionsOperations
from ._operations import ByoipPrefixesOperations
+from ._operations import SecurityOperations
from ._operations import SizesOperations
from ._operations import SnapshotsOperations
from ._operations import SpacesKeyOperations
@@ -70,12 +73,14 @@
"InvoicesOperations",
"BillingInsightsOperations",
"DatabasesOperations",
+ "DedicatedInferencesOperations",
"DomainsOperations",
"DropletsOperations",
"DropletActionsOperations",
"AutoscalepoolsOperations",
"FirewallsOperations",
"FunctionsOperations",
+ "FunctionsAccessKeyOperations",
"ImagesOperations",
"ImageActionsOperations",
"KubernetesOperations",
@@ -92,6 +97,7 @@
"ReservedIPv6Operations",
"ReservedIPv6ActionsOperations",
"ByoipPrefixesOperations",
+ "SecurityOperations",
"SizesOperations",
"SnapshotsOperations",
"SpacesKeyOperations",
diff --git a/src/pydo/operations/_operations.py b/src/pydo/operations/_operations.py
index f06ed50..4795b9e 100644
--- a/src/pydo/operations/_operations.py
+++ b/src/pydo/operations/_operations.py
@@ -1124,6 +1124,129 @@ def build_apps_get_job_invocation_logs_request( # pylint: disable=name-too-long
)
+def build_apps_list_events_request(
+ app_id: str,
+ *,
+ page: int = 1,
+ per_page: int = 20,
+ event_types: Optional[List[str]] = None,
+ **kwargs: Any,
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/apps/{app_id}/events"
+ path_format_arguments = {
+ "app_id": _SERIALIZER.url("app_id", app_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ if page is not None:
+ _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1)
+ if per_page is not None:
+ _params["per_page"] = _SERIALIZER.query(
+ "per_page", per_page, "int", maximum=200, minimum=1
+ )
+ if event_types is not None:
+ _params["event_types"] = _SERIALIZER.query("event_types", event_types, "[str]")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(
+ method="GET", url=_url, params=_params, headers=_headers, **kwargs
+ )
+
+
+def build_apps_get_event_request(
+ app_id: str, event_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/apps/{app_id}/events/{event_id}"
+ path_format_arguments = {
+ "app_id": _SERIALIZER.url("app_id", app_id, "str"),
+ "event_id": _SERIALIZER.url("event_id", event_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs)
+
+
+def build_apps_cancel_event_request(
+ app_id: str, event_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/apps/{app_id}/events/{event_id}/cancel"
+ path_format_arguments = {
+ "app_id": _SERIALIZER.url("app_id", app_id, "str"),
+ "event_id": _SERIALIZER.url("event_id", event_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs)
+
+
+def build_apps_get_event_logs_request(
+ app_id: str,
+ event_id: str,
+ *,
+ follow: Optional[bool] = None,
+ type: str = "UNSPECIFIED",
+ pod_connection_timeout: Optional[str] = None,
+ **kwargs: Any,
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/apps/{app_id}/events/{event_id}/logs"
+ path_format_arguments = {
+ "app_id": _SERIALIZER.url("app_id", app_id, "str"),
+ "event_id": _SERIALIZER.url("event_id", event_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ if follow is not None:
+ _params["follow"] = _SERIALIZER.query("follow", follow, "bool")
+ _params["type"] = _SERIALIZER.query("type", type, "str")
+ if pod_connection_timeout is not None:
+ _params["pod_connection_timeout"] = _SERIALIZER.query(
+ "pod_connection_timeout", pod_connection_timeout, "str"
+ )
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(
+ method="GET", url=_url, params=_params, headers=_headers, **kwargs
+ )
+
+
def build_apps_list_instance_sizes_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
@@ -3581,6 +3704,342 @@ def build_databases_delete_opensearch_index_request( # pylint: disable=name-too
return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs)
+def build_dedicated_inferences_get_request(
+ dedicated_inference_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/{dedicated_inference_id}"
+ path_format_arguments = {
+ "dedicated_inference_id": _SERIALIZER.url(
+ "dedicated_inference_id", dedicated_inference_id, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs)
+
+
+def build_dedicated_inferences_patch_request(
+ dedicated_inference_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/{dedicated_inference_id}"
+ path_format_arguments = {
+ "dedicated_inference_id": _SERIALIZER.url(
+ "dedicated_inference_id", dedicated_inference_id, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header(
+ "content_type", content_type, "str"
+ )
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs)
+
+
+def build_dedicated_inferences_delete_request( # pylint: disable=name-too-long
+ dedicated_inference_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/{dedicated_inference_id}"
+ path_format_arguments = {
+ "dedicated_inference_id": _SERIALIZER.url(
+ "dedicated_inference_id", dedicated_inference_id, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs)
+
+
+def build_dedicated_inferences_list_request(
+ *, per_page: int = 20, page: int = 1, region: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences"
+
+ # Construct parameters
+ if per_page is not None:
+ _params["per_page"] = _SERIALIZER.query(
+ "per_page", per_page, "int", maximum=200, minimum=1
+ )
+ if page is not None:
+ _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1)
+ if region is not None:
+ _params["region"] = _SERIALIZER.query("region", region, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(
+ method="GET", url=_url, params=_params, headers=_headers, **kwargs
+ )
+
+
+def build_dedicated_inferences_create_request(
+ **kwargs: Any,
+) -> HttpRequest: # pylint: disable=name-too-long
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences"
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header(
+ "content_type", content_type, "str"
+ )
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs)
+
+
+def build_dedicated_inferences_list_accelerators_request( # pylint: disable=name-too-long
+ dedicated_inference_id: str,
+ *,
+ per_page: int = 20,
+ page: int = 1,
+ slug: Optional[str] = None,
+ **kwargs: Any,
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/{dedicated_inference_id}/accelerators"
+ path_format_arguments = {
+ "dedicated_inference_id": _SERIALIZER.url(
+ "dedicated_inference_id", dedicated_inference_id, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ if per_page is not None:
+ _params["per_page"] = _SERIALIZER.query(
+ "per_page", per_page, "int", maximum=200, minimum=1
+ )
+ if page is not None:
+ _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1)
+ if slug is not None:
+ _params["slug"] = _SERIALIZER.query("slug", slug, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(
+ method="GET", url=_url, params=_params, headers=_headers, **kwargs
+ )
+
+
+def build_dedicated_inferences_get_accelerator_request( # pylint: disable=name-too-long
+ dedicated_inference_id: str, accelerator_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/{dedicated_inference_id}/accelerators/{accelerator_id}"
+ path_format_arguments = {
+ "dedicated_inference_id": _SERIALIZER.url(
+ "dedicated_inference_id", dedicated_inference_id, "str"
+ ),
+ "accelerator_id": _SERIALIZER.url("accelerator_id", accelerator_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs)
+
+
+def build_dedicated_inferences_get_ca_request( # pylint: disable=name-too-long
+ dedicated_inference_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/{dedicated_inference_id}/ca"
+ path_format_arguments = {
+ "dedicated_inference_id": _SERIALIZER.url(
+ "dedicated_inference_id", dedicated_inference_id, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs)
+
+
+def build_dedicated_inferences_list_tokens_request( # pylint: disable=name-too-long
+ dedicated_inference_id: str, *, per_page: int = 20, page: int = 1, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/{dedicated_inference_id}/tokens"
+ path_format_arguments = {
+ "dedicated_inference_id": _SERIALIZER.url(
+ "dedicated_inference_id", dedicated_inference_id, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ if per_page is not None:
+ _params["per_page"] = _SERIALIZER.query(
+ "per_page", per_page, "int", maximum=200, minimum=1
+ )
+ if page is not None:
+ _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1)
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(
+ method="GET", url=_url, params=_params, headers=_headers, **kwargs
+ )
+
+
+def build_dedicated_inferences_create_tokens_request( # pylint: disable=name-too-long
+ dedicated_inference_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/{dedicated_inference_id}/tokens"
+ path_format_arguments = {
+ "dedicated_inference_id": _SERIALIZER.url(
+ "dedicated_inference_id", dedicated_inference_id, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header(
+ "content_type", content_type, "str"
+ )
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs)
+
+
+def build_dedicated_inferences_delete_tokens_request( # pylint: disable=name-too-long
+ dedicated_inference_id: str, token_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/{dedicated_inference_id}/tokens/{token_id}"
+ path_format_arguments = {
+ "dedicated_inference_id": _SERIALIZER.url(
+ "dedicated_inference_id", dedicated_inference_id, "str"
+ ),
+ "token_id": _SERIALIZER.url("token_id", token_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs)
+
+
+def build_dedicated_inferences_list_sizes_request(
+ **kwargs: Any,
+) -> HttpRequest: # pylint: disable=name-too-long
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/sizes"
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs)
+
+
+def build_dedicated_inferences_get_gpu_model_config_request( # pylint: disable=name-too-long
+ **kwargs: Any,
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/dedicated-inferences/gpu-model-config"
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs)
+
+
def build_domains_list_request(
*, per_page: int = 20, page: int = 1, **kwargs: Any
) -> HttpRequest:
@@ -5108,6 +5567,106 @@ def build_functions_delete_trigger_request(
return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs)
+def build_functions_access_key_list_request(
+ namespace_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/functions/namespaces/{namespace_id}/keys"
+ path_format_arguments = {
+ "namespace_id": _SERIALIZER.url("namespace_id", namespace_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs)
+
+
+def build_functions_access_key_create_request( # pylint: disable=name-too-long
+ namespace_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/functions/namespaces/{namespace_id}/keys"
+ path_format_arguments = {
+ "namespace_id": _SERIALIZER.url("namespace_id", namespace_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header(
+ "content_type", content_type, "str"
+ )
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs)
+
+
+def build_functions_access_key_update_request( # pylint: disable=name-too-long
+ namespace_id: str, key_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/functions/namespaces/{namespace_id}/keys/{key_id}"
+ path_format_arguments = {
+ "namespace_id": _SERIALIZER.url("namespace_id", namespace_id, "str"),
+ "key_id": _SERIALIZER.url("key_id", key_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header(
+ "content_type", content_type, "str"
+ )
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs)
+
+
+def build_functions_access_key_delete_request( # pylint: disable=name-too-long
+ namespace_id: str, key_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/functions/namespaces/{namespace_id}/keys/{key_id}"
+ path_format_arguments = {
+ "namespace_id": _SERIALIZER.url("namespace_id", namespace_id, "str"),
+ "key_id": _SERIALIZER.url("key_id", key_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs)
+
+
def build_images_list_request(
*,
type: Optional[str] = None,
@@ -10078,6 +10637,275 @@ def build_byoip_prefixes_list_resources_request( # pylint: disable=name-too-lon
)
+def build_security_list_scans_request(
+ *, per_page: int = 20, page: int = 1, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/security/scans"
+
+ # Construct parameters
+ if per_page is not None:
+ _params["per_page"] = _SERIALIZER.query(
+ "per_page", per_page, "int", maximum=200, minimum=1
+ )
+ if page is not None:
+ _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1)
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(
+ method="GET", url=_url, params=_params, headers=_headers, **kwargs
+ )
+
+
+def build_security_create_scan_request(**kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/security/scans"
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs)
+
+
+def build_security_get_scan_request(
+ scan_id: str,
+ *,
+ severity: Optional[str] = None,
+ per_page: int = 20,
+ page: int = 1,
+ type: Optional[str] = None,
+ **kwargs: Any,
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/security/scans/{scan_id}"
+ path_format_arguments = {
+ "scan_id": _SERIALIZER.url("scan_id", scan_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ if severity is not None:
+ _params["severity"] = _SERIALIZER.query("severity", severity, "str")
+ if per_page is not None:
+ _params["per_page"] = _SERIALIZER.query(
+ "per_page", per_page, "int", maximum=200, minimum=1
+ )
+ if page is not None:
+ _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1)
+ if type is not None:
+ _params["type"] = _SERIALIZER.query("type", type, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(
+ method="GET", url=_url, params=_params, headers=_headers, **kwargs
+ )
+
+
+def build_security_get_latest_scan_request(
+ *,
+ per_page: int = 20,
+ page: int = 1,
+ severity: Optional[str] = None,
+ type: Optional[str] = None,
+ **kwargs: Any,
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/security/scans/latest"
+
+ # Construct parameters
+ if per_page is not None:
+ _params["per_page"] = _SERIALIZER.query(
+ "per_page", per_page, "int", maximum=200, minimum=1
+ )
+ if page is not None:
+ _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1)
+ if severity is not None:
+ _params["severity"] = _SERIALIZER.query("severity", severity, "str")
+ if type is not None:
+ _params["type"] = _SERIALIZER.query("type", type, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(
+ method="GET", url=_url, params=_params, headers=_headers, **kwargs
+ )
+
+
+def build_security_create_scan_rule_request(**kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/security/scans/rules"
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header(
+ "content_type", content_type, "str"
+ )
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs)
+
+
+def build_security_list_scan_finding_affected_resources_request( # pylint: disable=name-too-long
+ scan_id: str, finding_uuid: str, *, per_page: int = 20, page: int = 1, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/security/scans/{scan_id}/findings/{finding_uuid}/affected_resources"
+ path_format_arguments = {
+ "scan_id": _SERIALIZER.url("scan_id", scan_id, "str"),
+ "finding_uuid": _SERIALIZER.url("finding_uuid", finding_uuid, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ if per_page is not None:
+ _params["per_page"] = _SERIALIZER.query(
+ "per_page", per_page, "int", maximum=200, minimum=1
+ )
+ if page is not None:
+ _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1)
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(
+ method="GET", url=_url, params=_params, headers=_headers, **kwargs
+ )
+
+
+def build_security_list_settings_request(
+ *, per_page: int = 20, page: int = 1, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/security/settings"
+
+ # Construct parameters
+ if per_page is not None:
+ _params["per_page"] = _SERIALIZER.query(
+ "per_page", per_page, "int", maximum=200, minimum=1
+ )
+ if page is not None:
+ _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1)
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(
+ method="GET", url=_url, params=_params, headers=_headers, **kwargs
+ )
+
+
+def build_security_update_settings_plan_request(
+ **kwargs: Any,
+) -> HttpRequest: # pylint: disable=name-too-long
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/security/settings/plan"
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header(
+ "content_type", content_type, "str"
+ )
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs)
+
+
+def build_security_create_suppression_request(
+ **kwargs: Any,
+) -> HttpRequest: # pylint: disable=name-too-long
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ content_type: Optional[str] = kwargs.pop(
+ "content_type", _headers.pop("Content-Type", None)
+ )
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/security/settings/suppressions"
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header(
+ "content_type", content_type, "str"
+ )
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs)
+
+
+def build_security_delete_suppression_request( # pylint: disable=name-too-long
+ suppression_uuid: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = "/v2/security/settings/suppressions/{suppression_uuid}"
+ path_format_arguments = {
+ "suppression_uuid": _SERIALIZER.url(
+ "suppression_uuid", suppression_uuid, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs)
+
+
def build_sizes_list_request(
*, per_page: int = 20, page: int = 1, **kwargs: Any
) -> HttpRequest:
@@ -88909,8 +89737,10 @@ def get_logs_active_deployment(
* BUILD: Build-time logs
* DEPLOY: Deploy-time logs
* RUN: Live run-time logs
- * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are:
- "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED".
+ * RUN_RESTARTED: Logs of crashed/restarted instances during runtime
+ * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are:
+ "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value
+ is "UNSPECIFIED".
:paramtype type: str
:keyword pod_connection_timeout: An optional time duration to wait if the underlying component
instance is not immediately available. Default: ``3m``. Default value is None.
@@ -98753,8 +99583,10 @@ def get_logs(
* BUILD: Build-time logs
* DEPLOY: Deploy-time logs
* RUN: Live run-time logs
- * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are:
- "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED".
+ * RUN_RESTARTED: Logs of crashed/restarted instances during runtime
+ * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are:
+ "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value
+ is "UNSPECIFIED".
:paramtype type: str
:keyword pod_connection_timeout: An optional time duration to wait if the underlying component
instance is not immediately available. Default: ``3m``. Default value is None.
@@ -98901,8 +99733,10 @@ def get_logs_aggregate(
* BUILD: Build-time logs
* DEPLOY: Deploy-time logs
* RUN: Live run-time logs
- * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are:
- "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED".
+ * RUN_RESTARTED: Logs of crashed/restarted instances during runtime
+ * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are:
+ "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value
+ is "UNSPECIFIED".
:paramtype type: str
:keyword pod_connection_timeout: An optional time duration to wait if the underlying component
instance is not immediately available. Default: ``3m``. Default value is None.
@@ -99179,8 +100013,10 @@ def get_logs_active_deployment_aggregate(
* BUILD: Build-time logs
* DEPLOY: Deploy-time logs
* RUN: Live run-time logs
- * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are:
- "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED".
+ * RUN_RESTARTED: Logs of crashed/restarted instances during runtime
+ * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are:
+ "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value
+ is "UNSPECIFIED".
:paramtype type: str
:keyword pod_connection_timeout: An optional time duration to wait if the underlying component
instance is not immediately available. Default: ``3m``. Default value is None.
@@ -99940,260 +100776,28 @@ def get_job_invocation_logs(
return cast(JSON, deserialized) # type: ignore
@distributed_trace
- def list_instance_sizes(self, **kwargs: Any) -> JSON:
- # pylint: disable=line-too-long
- """List Instance Sizes.
-
- List all instance sizes for ``service``\\ , ``worker``\\ , and ``job`` components.
-
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # response body for status code(s): 200
- response == {
- "discount_percent": 0.0, # Optional.
- "instance_sizes": [
- {
- "bandwidth_allowance_gib": "str", # Optional. The bandwidth
- allowance in GiB for the instance size.
- "cpu_type": "UNSPECIFIED", # Optional. Default value is
- "UNSPECIFIED". * SHARED: Shared vCPU cores * DEDICATED: Dedicated vCPU
- cores. Known values are: "UNSPECIFIED", "SHARED", and "DEDICATED".
- "cpus": "str", # Optional. The number of allotted vCPU
- cores.
- "deprecation_intent": bool, # Optional. Indicates if the
- instance size is intended for deprecation.
- "memory_bytes": "str", # Optional. The allotted memory in
- bytes.
- "name": "str", # Optional. A human-readable name of the
- instance size.
- "scalable": bool, # Optional. Indicates if the instance size
- can enable autoscaling.
- "single_instance_only": bool, # Optional. Indicates if the
- instance size allows more than one instance.
- "slug": "str", # Optional. The slug of the instance size.
- "tier_downgrade_to": "str", # Optional. The slug of the
- corresponding downgradable instance size on the lower tier.
- "tier_slug": "str", # Optional. The slug of the tier to
- which this instance size belongs.
- "tier_upgrade_to": "str", # Optional. The slug of the
- corresponding upgradable instance size on the higher tier.
- "usd_per_month": "str", # Optional. The cost of this
- instance size in USD per month.
- "usd_per_second": "str" # Optional. The cost of this
- instance size in USD per second.
- }
- ]
- }
- """
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- _request = build_apps_list_instance_sizes_request(
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- if _stream:
- response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- response_headers = {}
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
-
- @distributed_trace
- def get_instance_size(self, slug: str, **kwargs: Any) -> JSON:
+ def list_events(
+ self,
+ app_id: str,
+ *,
+ page: int = 1,
+ per_page: int = 20,
+ event_types: Optional[List[str]] = None,
+ **kwargs: Any,
+ ) -> JSON:
# pylint: disable=line-too-long
- """Retrieve an Instance Size.
-
- Retrieve information about a specific instance size for ``service``\\ , ``worker``\\ , and
- ``job`` components.
-
- :param slug: The slug of the instance size. Required.
- :type slug: str
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # response body for status code(s): 200
- response == {
- "instance_size": {
- "bandwidth_allowance_gib": "str", # Optional. The bandwidth
- allowance in GiB for the instance size.
- "cpu_type": "UNSPECIFIED", # Optional. Default value is
- "UNSPECIFIED". * SHARED: Shared vCPU cores * DEDICATED: Dedicated vCPU
- cores. Known values are: "UNSPECIFIED", "SHARED", and "DEDICATED".
- "cpus": "str", # Optional. The number of allotted vCPU cores.
- "deprecation_intent": bool, # Optional. Indicates if the instance
- size is intended for deprecation.
- "memory_bytes": "str", # Optional. The allotted memory in bytes.
- "name": "str", # Optional. A human-readable name of the instance
- size.
- "scalable": bool, # Optional. Indicates if the instance size can
- enable autoscaling.
- "single_instance_only": bool, # Optional. Indicates if the instance
- size allows more than one instance.
- "slug": "str", # Optional. The slug of the instance size.
- "tier_downgrade_to": "str", # Optional. The slug of the
- corresponding downgradable instance size on the lower tier.
- "tier_slug": "str", # Optional. The slug of the tier to which this
- instance size belongs.
- "tier_upgrade_to": "str", # Optional. The slug of the corresponding
- upgradable instance size on the higher tier.
- "usd_per_month": "str", # Optional. The cost of this instance size
- in USD per month.
- "usd_per_second": "str" # Optional. The cost of this instance size
- in USD per second.
- }
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- _request = build_apps_get_instance_size_request(
- slug=slug,
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200, 404]:
- if _stream:
- response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
-
- @distributed_trace
- def list_regions(self, **kwargs: Any) -> JSON:
- """List App Regions.
+ """List App Events.
- List all regions supported by App Platform.
+ List all events for an app, including deployments and autoscaling events.
+ :param app_id: The app ID. Required.
+ :type app_id: str
+ :keyword page: Which 'page' of paginated results to return. Default value is 1.
+ :paramtype page: int
+ :keyword per_page: Number of items returned per page. Default value is 20.
+ :paramtype per_page: int
+ :keyword event_types: Filter events by event type. Default value is None.
+ :paramtype event_types: list[str]
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -100203,2699 +100807,3450 @@ def list_regions(self, **kwargs: Any) -> JSON:
# response body for status code(s): 200
response == {
- "regions": [
+ "events": [
{
- "continent": "str", # Optional. The continent that this
- region is in.
- "data_centers": [
- "str" # Optional. Data centers that are in this
- region.
- ],
- "default": bool, # Optional. Whether or not the region is
- presented as the default.
- "disabled": bool, # Optional. Whether or not the region is
- open for new apps.
- "flag": "str", # Optional. The flag of this region.
- "label": "str", # Optional. A human-readable name of the
- region.
- "reason": "str", # Optional. Reason that this region is not
- available.
- "slug": "str" # Optional. The slug form of the region name.
- }
- ]
- }
- """
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- _request = build_apps_list_regions_request(
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- if _stream:
- response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- response_headers = {}
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
-
- @overload
- def validate_app_spec(
- self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
- ) -> JSON:
- # pylint: disable=line-too-long
- """Propose an App Spec.
-
- To propose and validate a spec for a new or existing app, send a POST request to the
- ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app,
- including app cost and upgrade cost. If an existing app ID is specified, the app spec is
- treated as a proposed update to the existing app.
-
- :param body: Required.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "spec": {
- "name": "str", # The name of the app. Must be unique across all apps
- in the same account. Required.
- "databases": [
- {
- "name": "str", # The database's name. The name must
- be unique across all components within the same app and cannot use
- capital letters. Required.
- "cluster_name": "str", # Optional. The name of the
- underlying DigitalOcean DBaaS cluster. This is required for
- production databases. For dev databases, if cluster_name is not set,
- a new cluster will be provisioned.
- "db_name": "str", # Optional. The name of the MySQL
- or PostgreSQL database to configure.
- "db_user": "str", # Optional. The name of the MySQL
- or PostgreSQL user to configure.
- "engine": "UNSET", # Optional. Default value is
- "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB:
- MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey.
- Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB",
- "KAFKA", "OPENSEARCH", and "VALKEY".
- "production": bool, # Optional. Whether this is a
- production or dev database.
- "version": "str" # Optional. The version of the
- database engine.
- }
- ],
- "disable_edge_cache": False, # Optional. Default value is False. ..
- role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app
- will **not** be cached at the edge (CDN). Enable this option if you want to
- manage CDN configuration yourself"u2014whether by using an external CDN
- provider or by handling static content and caching within your app. This
- setting is also recommended for apps that require real-time data or serve
- dynamic content, such as those using Server-Sent Events (SSE) over GET, or
- hosting an MCP (Model Context Protocol) Server that utilizes SSE.""
- :raw-html-m2r:`
` **Note:** This feature is not available for static site
- components."" :raw-html-m2r:`
` For more information, see `Disable CDN
- Cache
- `_.
- "disable_email_obfuscation": False, # Optional. Default value is
- False. If set to ``true``"" , email addresses in the app will not be
- obfuscated. This is useful for apps that require email addresses to be
- visible (in the HTML markup).
- "domains": [
- {
- "domain": "str", # The hostname for the domain.
- Required.
- "minimum_tls_version": "str", # Optional. The
- minimum version of TLS a client application can use to access
- resources for the domain. Must be one of the following values
- wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are:
- "1.2" and "1.3".
- "type": "UNSPECIFIED", # Optional. Default value is
- "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain
- assigned to this app * PRIMARY: The primary domain for this app that
- is displayed as the default in the control panel, used in bindable
- environment variables, and any other places that reference an app's
- live URL. Only one domain may be set as primary. * ALIAS: A
- non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT",
- "PRIMARY", and "ALIAS".
- "wildcard": bool, # Optional. Indicates whether the
- domain includes all sub-domains, in addition to the given domain.
- "zone": "str" # Optional. Optional. If the domain
- uses DigitalOcean DNS and you would like App Platform to
- automatically manage it for you, set this to the name of the domain
- on your account. For example, If the domain you are adding is
- ``app.domain.com``"" , the zone could be ``domain.com``.
- }
- ],
- "egress": {
- "type": "AUTOASSIGN" # Optional. Default value is
- "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and
- "DEDICATED_IP".
- },
- "enhanced_threat_control_enabled": False, # Optional. Default value
- is False. If set to ``true``"" , suspicious requests will go through
- additional security checks to help mitigate layer 7 DDoS attacks.
- "functions": [
- {
- "name": "str", # The name. Must be unique across all
- components within the same app. Required.
- "alerts": [
- {
- "disabled": bool, # Optional. Is the
- alert disabled?.
- "operator": "UNSPECIFIED_OPERATOR",
- # Optional. Default value is "UNSPECIFIED_OPERATOR". Known
- values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
- "LESS_THAN".
- "rule": "UNSPECIFIED_RULE", #
- Optional. Default value is "UNSPECIFIED_RULE". Known values
- are: "UNSPECIFIED_RULE", "CPU_UTILIZATION",
- "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED",
- "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE",
- "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED",
- "FUNCTIONS_ACTIVATION_COUNT",
- "FUNCTIONS_AVERAGE_DURATION_MS",
- "FUNCTIONS_ERROR_RATE_PER_MINUTE",
- "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT",
- and "FUNCTIONS_GB_RATE_PER_SECOND".
- "value": 0.0, # Optional. Threshold
- value for alert.
- "window": "UNSPECIFIED_WINDOW" #
- Optional. Default value is "UNSPECIFIED_WINDOW". Known values
- are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
- "THIRTY_MINUTES", and "ONE_HOUR".
- }
- ],
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "cors": {
- "allow_credentials": bool, # Optional.
- Whether browsers should expose the response to the client-side
- JavaScript code when the request"u2019s credentials mode is
- include. This configures the ``Access-Control-Allow-Credentials``
- header.
- "allow_headers": [
- "str" # Optional. The set of allowed
- HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of allowed
- HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
- ],
- "allow_origins": [
- {
- "exact": "str", # Optional.
- Exact string match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "prefix": "str", # Optional.
- Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "regex": "str" # Optional.
- RE2 style regex-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set. For more
- information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
- }
- ],
- "expose_headers": [
- "str" # Optional. The set of HTTP
- response headers that browsers are allowed to access. This
- configures the ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An optional
- duration specifying how long browsers can cache the results of a
- preflight request. This configures the ``Access-Control-Max-Age``
- header.
- },
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
+ "autoscaling": {
+ "components": {
+ "str": {
+ "from": 0, # Optional. The number of
+ replicas before scaling.
+ "to": 0, # Optional. The number of
+ replicas after scaling.
+ "triggering_metric": "str" #
+ Optional. The metric that triggered the scale change. Known
+ values are "cpu", "requests_per_second", "request_duration".
+ For inactivity sleep, "scale_from_zero" and "scale_to_zero"
+ are used.
}
- ],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
},
- "log_destinations": [
+ "phase": "str" # Optional. The current phase of the
+ autoscaling event. Known values are: "UNKNOWN", "PENDING",
+ "IN_PROGRESS", "SUCCEEDED", "FAILED", and "CANCELED".
+ },
+ "created_at": "2020-02-20 00:00:00", # Optional. When the
+ event was created.
+ "deployment": {
+ "cause": "str", # Optional. What caused this
+ deployment to be created.
+ "cloned_from": "str", # Optional. The ID of a
+ previous deployment that this deployment was cloned from.
+ "created_at": "2020-02-20 00:00:00", # Optional. The
+ creation time of the deployment.
+ "functions": [
{
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
- },
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
- },
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
- },
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
- },
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
- }
+ "name": "str", # Optional. The name
+ of this functions component.
+ "namespace": "str", # Optional. The
+ namespace where the functions are deployed.
+ "source_commit_hash": "str" #
+ Optional. The commit hash of the repository that was used to
+ build this functions component.
}
],
- "routes": [
+ "id": "str", # Optional. The ID of the deployment.
+ "jobs": [
{
- "path": "str", # Optional.
- (Deprecated - Use Ingress Rules instead). An HTTP path
- prefix. Paths must start with / and must be unique across all
- components within an app.
- "preserve_path_prefix": bool #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``.
+ "name": "str", # Optional. The name
+ of this job.
+ "source_commit_hash": "str" #
+ Optional. The commit hash of the repository that was used to
+ build this job.
}
],
- "source_dir": "str" # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- }
- ],
- "ingress": {
- "rules": [
- {
- "component": {
- "name": "str", # The name of the
- component to route to. Required.
- "preserve_path_prefix": "str", #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``. Note: this is not applicable for Functions
- Components and is mutually exclusive with ``rewrite``.
- "rewrite": "str" # Optional. An
- optional field that will rewrite the path of the component to
- be what is specified here. By default, the HTTP request path
- will be trimmed from the left when forwarded to the
- component. For example, a component with ``path=/api`` will
- have requests to ``/api/list`` trimmed to ``/list``. If you
- specified the rewrite to be ``/v1/``"" , requests to
- ``/api/list`` would be rewritten to ``/v1/list``. Note: this
- is mutually exclusive with ``preserve_path_prefix``.
- },
- "cors": {
- "allow_credentials": bool, #
- Optional. Whether browsers should expose the response to the
- client-side JavaScript code when the request"u2019s
- credentials mode is include. This configures the
- ``Access-Control-Allow-Credentials`` header.
- "allow_headers": [
- "str" # Optional. The set of
- allowed HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of
- allowed HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
- ],
- "allow_origins": [
- {
- "exact": "str", #
- Optional. Exact string match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set.
- "prefix": "str", #
- Optional. Prefix-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set.
- "regex": "str" #
- Optional. RE2 style regex-based match. Only 1 of
- ``exact``"" , ``prefix``"" , or ``regex`` must be
- set. For more information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
- }
- ],
- "expose_headers": [
- "str" # Optional. The set of
- HTTP response headers that browsers are allowed to
- access. This configures the
- ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An
- optional duration specifying how long browsers can cache the
- results of a preflight request. This configures the
- ``Access-Control-Max-Age`` header.
- },
- "match": {
- "authority": {
- "exact": "str" # Required.
- },
- "path": {
- "prefix": "str" #
- Prefix-based match. For example, ``/api`` will match
- ``/api``"" , ``/api/``"" , and any nested paths such as
- ``/api/v1/endpoint``. Required.
+ "phase": "UNKNOWN", # Optional. Default value is
+ "UNKNOWN". Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING",
+ "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and
+ "CANCELED".
+ "phase_last_updated_at": "2020-02-20 00:00:00", #
+ Optional. When the deployment phase was last updated.
+ "progress": {
+ "error_steps": 0, # Optional. Number of
+ unsuccessful steps.
+ "pending_steps": 0, # Optional. Number of
+ pending steps.
+ "running_steps": 0, # Optional. Number of
+ currently running steps.
+ "steps": [
+ {
+ "component_name": "str", #
+ Optional. The component name that this step is associated
+ with.
+ "ended_at": "2020-02-20
+ 00:00:00", # Optional. The end time of this step.
+ "message_base": "str", #
+ Optional. The base of a human-readable description of the
+ step intended to be combined with the component name for
+ presentation. For example: ``message_base`` = "Building
+ service" ``component_name`` = "api".
+ "name": "str", # Optional.
+ The name of this step.
+ "reason": {
+ "code": "str", #
+ Optional. The error code.
+ "message": "str" #
+ Optional. The error message.
+ },
+ "started_at": "2020-02-20
+ 00:00:00", # Optional. The start time of this step.
+ "status": "UNKNOWN", #
+ Optional. Default value is "UNKNOWN". Known values are:
+ "UNKNOWN", "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child
+ steps of this step.
+ ]
}
- },
- "redirect": {
- "authority": "str", # Optional. The
- authority/host to redirect to. This can be a hostname or IP
- address. Note: use ``port`` to set the port.
- "port": 0, # Optional. The port to
- redirect to.
- "redirect_code": 0, # Optional. The
- redirect code to use. Defaults to ``302``. Supported values
- are 300, 301, 302, 303, 304, 307, 308.
- "scheme": "str", # Optional. The
- scheme to redirect to. Supported values are ``http`` or
- ``https``. Default: ``https``.
- "uri": "str" # Optional. An optional
- URI path to redirect to. Note: if this is specified the whole
- URI of the original request will be overwritten to this
- value, irrespective of the original request URI being
- matched.
- }
- }
- ]
- },
- "jobs": [
- {
- "autoscaling": {
- "max_instance_count": 0, # Optional. The
- maximum amount of instances for this component. Must be more than
- min_instance_count.
- "metrics": {
- "cpu": {
- "percent": 80 # Optional.
- Default value is 80. The average target CPU utilization
- for the component.
+ ],
+ "success_steps": 0, # Optional. Number of
+ successful steps.
+ "summary_steps": [
+ {
+ "component_name": "str", #
+ Optional. The component name that this step is associated
+ with.
+ "ended_at": "2020-02-20
+ 00:00:00", # Optional. The end time of this step.
+ "message_base": "str", #
+ Optional. The base of a human-readable description of the
+ step intended to be combined with the component name for
+ presentation. For example: ``message_base`` = "Building
+ service" ``component_name`` = "api".
+ "name": "str", # Optional.
+ The name of this step.
+ "reason": {
+ "code": "str", #
+ Optional. The error code.
+ "message": "str" #
+ Optional. The error message.
+ },
+ "started_at": "2020-02-20
+ 00:00:00", # Optional. The start time of this step.
+ "status": "UNKNOWN", #
+ Optional. Default value is "UNKNOWN". Known values are:
+ "UNKNOWN", "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child
+ steps of this step.
+ ]
}
- },
- "min_instance_count": 0 # Optional. The
- minimum amount of instances for this component. Must be less than
- max_instance_count.
- },
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
+ ],
+ "total_steps": 0 # Optional. Total number of
+ steps.
},
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
+ "services": [
{
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
+ "name": "str", # Optional. The name
+ of this service.
+ "source_commit_hash": "str" #
+ Optional. The commit hash of the repository that was used to
+ build this service.
}
],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
- },
- "instance_count": 1, # Optional. Default value is 1.
- The amount of instances that this component should be scaled to.
- Default: 1. Must not be set if autoscaling is used.
- "instance_size_slug": {},
- "kind": "UNSPECIFIED", # Optional. Default value is
- "UNSPECIFIED". * UNSPECIFIED: Default job type, will auto-complete to
- POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job that runs before an
- app deployment. * POST_DEPLOY: Indicates a job that runs after an app
- deployment. * FAILED_DEPLOY: Indicates a job that runs after a
- component fails to deploy. Known values are: "UNSPECIFIED",
- "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY".
- "log_destinations": [
- {
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
- },
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
- },
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
- },
+ "spec": {
+ "name": "str", # The name of the app. Must
+ be unique across all apps in the same account. Required.
+ "databases": [
+ {
+ "name": "str", # The
+ database's name. The name must be unique across all
+ components within the same app and cannot use capital
+ letters. Required.
"cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
- },
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
+ Optional. The name of the underlying DigitalOcean DBaaS
+ cluster. This is required for production databases. For
+ dev databases, if cluster_name is not set, a new cluster
+ will be provisioned.
+ "db_name": "str", #
+ Optional. The name of the MySQL or PostgreSQL database to
+ configure.
+ "db_user": "str", #
+ Optional. The name of the MySQL or PostgreSQL user to
+ configure.
+ "engine": "UNSET", #
+ Optional. Default value is "UNSET". * MYSQL: MySQL * PG:
+ PostgreSQL * REDIS: Caching * MONGODB: MongoDB * KAFKA:
+ Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. Known
+ values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB",
+ "KAFKA", "OPENSEARCH", and "VALKEY".
+ "production": bool, #
+ Optional. Whether this is a production or dev database.
+ "version": "str" # Optional.
+ The version of the database engine.
}
- }
- ],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str", # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- "termination": {
- "grace_period_seconds": 0 # Optional. The
- number of seconds to wait between sending a TERM signal to a
- container and issuing a KILL which causes immediate shutdown.
- (Default 120).
- }
- }
- ],
- "maintenance": {
- "archive": bool, # Optional. Indicates whether the app
- should be archived. Setting this to true implies that enabled is set to
- true.
- "enabled": bool, # Optional. Indicates whether maintenance
- mode should be enabled for the app.
- "offline_page_url": "str" # Optional. A custom offline page
- to display when maintenance mode is enabled or the app is archived.
- },
- "region": "str", # Optional. The slug form of the geographical
- origin of the app. Default: ``nearest available``. Known values are: "atl",
- "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", "sgp", and "syd".
- "services": [
- {
- "autoscaling": {
- "max_instance_count": 0, # Optional. The
- maximum amount of instances for this component. Must be more than
- min_instance_count.
- "metrics": {
- "cpu": {
- "percent": 80 # Optional.
- Default value is 80. The average target CPU utilization
- for the component.
- }
- },
- "min_instance_count": 0 # Optional. The
- minimum amount of instances for this component. Must be less than
- max_instance_count.
- },
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "cors": {
- "allow_credentials": bool, # Optional.
- Whether browsers should expose the response to the client-side
- JavaScript code when the request"u2019s credentials mode is
- include. This configures the ``Access-Control-Allow-Credentials``
- header.
- "allow_headers": [
- "str" # Optional. The set of allowed
- HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of allowed
- HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
],
- "allow_origins": [
+ "disable_edge_cache": False, # Optional.
+ Default value is False. .. role:: raw-html-m2r(raw) :format:
+ html If set to ``true``"" , the app will **not** be cached at
+ the edge (CDN). Enable this option if you want to manage CDN
+ configuration yourself"u2014whether by using an external CDN
+ provider or by handling static content and caching within your
+ app. This setting is also recommended for apps that require
+ real-time data or serve dynamic content, such as those using
+ Server-Sent Events (SSE) over GET, or hosting an MCP (Model
+ Context Protocol) Server that utilizes SSE.""
+ :raw-html-m2r:`
` **Note:** This feature is not available for
+ static site components."" :raw-html-m2r:`
` For more
+ information, see `Disable CDN Cache
+ `_.
+ "disable_email_obfuscation": False, #
+ Optional. Default value is False. If set to ``true``"" , email
+ addresses in the app will not be obfuscated. This is useful for
+ apps that require email addresses to be visible (in the HTML
+ markup).
+ "domains": [
{
- "exact": "str", # Optional.
- Exact string match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "prefix": "str", # Optional.
- Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "regex": "str" # Optional.
- RE2 style regex-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set. For more
- information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
+ "domain": "str", # The
+ hostname for the domain. Required.
+ "minimum_tls_version": "str",
+ # Optional. The minimum version of TLS a client
+ application can use to access resources for the domain.
+ Must be one of the following values wrapped within
+ quotations: ``"1.2"`` or ``"1.3"``. Known values are:
+ "1.2" and "1.3".
+ "type": "UNSPECIFIED", #
+ Optional. Default value is "UNSPECIFIED". * DEFAULT: The
+ default ``.ondigitalocean.app`` domain assigned to this
+ app * PRIMARY: The primary domain for this app that is
+ displayed as the default in the control panel, used in
+ bindable environment variables, and any other places that
+ reference an app's live URL. Only one domain may be set
+ as primary. * ALIAS: A non-primary domain. Known values
+ are: "UNSPECIFIED", "DEFAULT", "PRIMARY", and "ALIAS".
+ "wildcard": bool, #
+ Optional. Indicates whether the domain includes all
+ sub-domains, in addition to the given domain.
+ "zone": "str" # Optional.
+ Optional. If the domain uses DigitalOcean DNS and you
+ would like App Platform to automatically manage it for
+ you, set this to the name of the domain on your account.
+ For example, If the domain you are adding is
+ ``app.domain.com``"" , the zone could be ``domain.com``.
}
],
- "expose_headers": [
- "str" # Optional. The set of HTTP
- response headers that browsers are allowed to access. This
- configures the ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An optional
- duration specifying how long browsers can cache the results of a
- preflight request. This configures the ``Access-Control-Max-Age``
- header.
- },
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
- }
- ],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "health_check": {
- "failure_threshold": 0, # Optional. The
- number of failed health checks before considered unhealthy.
- "http_path": "str", # Optional. The route
- path used for the HTTP health check ping. If not set, the HTTP
- health check will be disabled and a TCP health check used
- instead.
- "initial_delay_seconds": 0, # Optional. The
- number of seconds to wait before beginning health checks.
- "period_seconds": 0, # Optional. The number
- of seconds to wait between health checks.
- "port": 0, # Optional. The port on which the
- health check will be performed. If not set, the health check will
- be performed on the component's http_port.
- "success_threshold": 0, # Optional. The
- number of successful health checks before considered healthy.
- "timeout_seconds": 0 # Optional. The number
- of seconds after which the check times out.
- },
- "http_port": 0, # Optional. The internal port on
- which this service's run command will listen. Default: 8080 If there
- is not an environment variable with the name ``PORT``"" , one will be
- automatically added with its value set to the value of this field.
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
+ "egress": {
+ "type": "AUTOASSIGN" # Optional.
+ Default value is "AUTOASSIGN". The app egress type. Known
+ values are: "AUTOASSIGN" and "DEDICATED_IP".
},
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
- },
- "instance_count": 1, # Optional. Default value is 1.
- The amount of instances that this component should be scaled to.
- Default: 1. Must not be set if autoscaling is used.
- "instance_size_slug": {},
- "internal_ports": [
- 0 # Optional. The ports on which this
- service will listen for internal traffic.
- ],
- "liveness_health_check": {
- "failure_threshold": 0, # Optional. The
- number of failed health checks before considered unhealthy.
- "http_path": "str", # Optional. The route
- path used for the HTTP health check ping. If not set, the HTTP
- health check will be disabled and a TCP health check used
- instead.
- "initial_delay_seconds": 0, # Optional. The
- number of seconds to wait before beginning health checks.
- "period_seconds": 0, # Optional. The number
- of seconds to wait between health checks.
- "port": 0, # Optional. The port on which the
- health check will be performed.
- "success_threshold": 0, # Optional. The
- number of successful health checks before considered healthy.
- "timeout_seconds": 0 # Optional. The number
- of seconds after which the check times out.
- },
- "log_destinations": [
- {
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
- },
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
- },
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
+ "enhanced_threat_control_enabled": False, #
+ Optional. Default value is False. If set to ``true``"" ,
+ suspicious requests will go through additional security checks to
+ help mitigate layer 7 DDoS attacks.
+ "functions": [
+ {
+ "name": "str", # The name.
+ Must be unique across all components within the same app.
+ Required.
+ "alerts": [
+ {
+ "disabled":
+ bool, # Optional. Is the alert disabled?.
+ "operator":
+ "UNSPECIFIED_OPERATOR", # Optional. Default
+ value is "UNSPECIFIED_OPERATOR". Known values
+ are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
+ "LESS_THAN".
+ "rule":
+ "UNSPECIFIED_RULE", # Optional. Default value is
+ "UNSPECIFIED_RULE". Known values are:
+ "UNSPECIFIED_RULE", "CPU_UTILIZATION",
+ "MEM_UTILIZATION", "RESTART_COUNT",
+ "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE",
+ "DOMAIN_FAILED", "DOMAIN_LIVE",
+ "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED",
+ "FUNCTIONS_ACTIVATION_COUNT",
+ "FUNCTIONS_AVERAGE_DURATION_MS",
+ "FUNCTIONS_ERROR_RATE_PER_MINUTE",
+ "FUNCTIONS_AVERAGE_WAIT_TIME_MS",
+ "FUNCTIONS_ERROR_COUNT", and
+ "FUNCTIONS_GB_RATE_PER_SECOND".
+ "value": 0.0,
+ # Optional. Threshold value for alert.
+ "window":
+ "UNSPECIFIED_WINDOW" # Optional. Default value
+ is "UNSPECIFIED_WINDOW". Known values are:
+ "UNSPECIFIED_WINDOW", "FIVE_MINUTES",
+ "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR".
+ }
+ ],
+ "bitbucket": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
},
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
- },
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
+ "cors": {
+ "allow_credentials":
+ bool, # Optional. Whether browsers should expose the
+ response to the client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" #
+ Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" #
+ Optional. The set of allowed HTTP methods. This
+ configures the ``Access-Control-Allow-Methods``
+ header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "prefix": "str", # Optional. Prefix-based
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For
+ more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" #
+ Optional. The set of HTTP response headers that
+ browsers are allowed to access. This configures
+ the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" #
+ Optional. An optional duration specifying how long
+ browsers can cache the results of a preflight
+ request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "envs": [
+ {
+ "key": "str",
+ # The variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value
+ is "RUN_AND_BUILD_TIME". * RUN_TIME: Made
+ available only at run-time * BUILD_TIME: Made
+ available only at build-time *
+ RUN_AND_BUILD_TIME: Made available at both build
+ and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and
+ "RUN_AND_BUILD_TIME".
+ "type":
+ "GENERAL", # Optional. Default value is
+ "GENERAL". * GENERAL: A plain-text environment
+ variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and
+ "SECRET".
+ "value":
+ "str" # Optional. The value. If the type is
+ ``SECRET``"" , the value will be encrypted on
+ first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "repo_clone_url":
+ "str" # Optional. The clone URL of the repo.
+ Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "log_destinations": [
+ {
+ "name":
+ "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog API key.
+ Required.
+ "endpoint": "str" # Optional. Datadog HTTP
+ log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", # Optional. Password
+ for user defined in User. Is required
+ when ``endpoint`` is set. Cannot be set
+ if using a DigitalOcean DBaaS OpenSearch
+ cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name
+ of a DigitalOcean DBaaS OpenSearch cluster to
+ use as a log forwarding destination. Cannot
+ be specified if ``endpoint`` is also
+ specified.
+ "endpoint": "str", # Optional. OpenSearch
+ API Endpoint. Only HTTPS is supported.
+ Format:
+ https://:code:``::code:``. Cannot
+ be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" # Optional. Default
+ value is "logs". The index name to use for
+ the logs. If not set, the default index name
+ is "logs".
+ },
+ "papertrail":
+ {
+ "endpoint": "str" # Papertrail syslog
+ endpoint. Required.
+ }
+ }
+ ],
+ "routes": [
+ {
+ "path":
+ "str", # Optional. (Deprecated - Use Ingress
+ Rules instead). An HTTP path prefix. Paths must
+ start with / and must be unique across all
+ components within an app.
+ "preserve_path_prefix": bool # Optional. An
+ optional flag to preserve the path that is
+ forwarded to the backend service. By default, the
+ HTTP request path will be trimmed from the left
+ when forwarded to the component. For example, a
+ component with ``path=/api`` will have requests
+ to ``/api/list`` trimmed to ``/list``. If this
+ value is ``true``"" , the path will remain
+ ``/api/list``.
+ }
+ ],
+ "source_dir": "str" #
+ Optional. An optional path to the working directory to
+ use for the build. For Dockerfile builds, this will be
+ used as the build context. Must be relative to the root
+ of the repo.
}
- }
- ],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "protocol": "str", # Optional. The protocol which
- the service uses to serve traffic on the http_port. * ``HTTP``"" :
- The app is serving the HTTP protocol. Default. * ``HTTP2``"" : The
- app is serving the HTTP/2 protocol. Currently, this needs to be
- implemented in the service by serving HTTP/2 cleartext (h2c). Known
- values are: "HTTP" and "HTTP2".
- "routes": [
- {
- "path": "str", # Optional.
- (Deprecated - Use Ingress Rules instead). An HTTP path
- prefix. Paths must start with / and must be unique across all
- components within an app.
- "preserve_path_prefix": bool #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``.
- }
- ],
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str", # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- "termination": {
- "drain_seconds": 0, # Optional. The number
- of seconds to wait between selecting a container instance for
- termination and issuing the TERM signal. Selecting a container
- instance for termination begins an asynchronous drain of new
- requests on upstream load-balancers. (Default 15).
- "grace_period_seconds": 0 # Optional. The
- number of seconds to wait between sending a TERM signal to a
- container and issuing a KILL which causes immediate shutdown.
- (Default 120).
- }
- }
- ],
- "static_sites": [
- {
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "catchall_document": "str", # Optional. The name of
- the document to use as the fallback for any requests to documents
- that are not found when serving this static site. Only 1 of
- ``catchall_document`` or ``error_document`` can be set.
- "cors": {
- "allow_credentials": bool, # Optional.
- Whether browsers should expose the response to the client-side
- JavaScript code when the request"u2019s credentials mode is
- include. This configures the ``Access-Control-Allow-Credentials``
- header.
- "allow_headers": [
- "str" # Optional. The set of allowed
- HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of allowed
- HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
],
- "allow_origins": [
+ "ingress": {
+ "rules": [
+ {
+ "component": {
+ "name":
+ "str", # The name of the component to route to.
+ Required.
+ "preserve_path_prefix": "str", # Optional. An
+ optional flag to preserve the path that is
+ forwarded to the backend service. By default, the
+ HTTP request path will be trimmed from the left
+ when forwarded to the component. For example, a
+ component with ``path=/api`` will have requests
+ to ``/api/list`` trimmed to ``/list``. If this
+ value is ``true``"" , the path will remain
+ ``/api/list``. Note: this is not applicable for
+ Functions Components and is mutually exclusive
+ with ``rewrite``.
+ "rewrite":
+ "str" # Optional. An optional field that will
+ rewrite the path of the component to be what is
+ specified here. By default, the HTTP request path
+ will be trimmed from the left when forwarded to
+ the component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If you specified the
+ rewrite to be ``/v1/``"" , requests to
+ ``/api/list`` would be rewritten to ``/v1/list``.
+ Note: this is mutually exclusive with
+ ``preserve_path_prefix``.
+ },
+ "cors": {
+ "allow_credentials": bool, # Optional. Whether
+ browsers should expose the response to the
+ client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str"
+ # Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str"
+ # Optional. The set of allowed HTTP methods.
+ This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set.
+ "prefix": "str", # Optional.
+ Prefix-based match. Only 1 of ``exact``""
+ , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``""
+ , ``prefix``"" , or ``regex`` must be
+ set. For more information about RE2
+ syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str"
+ # Optional. The set of HTTP response headers
+ that browsers are allowed to access. This
+ configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age":
+ "str" # Optional. An optional duration
+ specifying how long browsers can cache the
+ results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "match": {
+ "authority":
+ {
+ "exact": "str" # Required.
+ },
+ "path": {
+ "prefix": "str" # Prefix-based match. For
+ example, ``/api`` will match ``/api``"" ,
+ ``/api/``"" , and any nested paths such as
+ ``/api/v1/endpoint``. Required.
+ }
+ },
+ "redirect": {
+ "authority":
+ "str", # Optional. The authority/host to
+ redirect to. This can be a hostname or IP
+ address. Note: use ``port`` to set the port.
+ "port": 0, #
+ Optional. The port to redirect to.
+ "redirect_code": 0, # Optional. The redirect
+ code to use. Defaults to ``302``. Supported
+ values are 300, 301, 302, 303, 304, 307, 308.
+ "scheme":
+ "str", # Optional. The scheme to redirect to.
+ Supported values are ``http`` or ``https``.
+ Default: ``https``.
+ "uri": "str"
+ # Optional. An optional URI path to redirect to.
+ Note: if this is specified the whole URI of the
+ original request will be overwritten to this
+ value, irrespective of the original request URI
+ being matched.
+ }
+ }
+ ]
+ },
+ "jobs": [
{
- "exact": "str", # Optional.
- Exact string match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "prefix": "str", # Optional.
- Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "regex": "str" # Optional.
- RE2 style regex-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set. For more
- information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
+ "autoscaling": {
+ "max_instance_count":
+ 0, # Optional. The maximum amount of instances for
+ this component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80 # Optional. Default value is
+ 80. The average target CPU utilization for
+ the component.
+ }
+ },
+ "min_instance_count":
+ 0 # Optional. The minimum amount of instances for
+ this component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", #
+ Optional. An optional build command to run while building
+ this component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root
+ of the repo. If set, it will be used to build this
+ component. Otherwise, App Platform will attempt to build
+ it using buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str",
+ # The variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value
+ is "RUN_AND_BUILD_TIME". * RUN_TIME: Made
+ available only at run-time * BUILD_TIME: Made
+ available only at build-time *
+ RUN_AND_BUILD_TIME: Made available at both build
+ and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and
+ "RUN_AND_BUILD_TIME".
+ "type":
+ "GENERAL", # Optional. Default value is
+ "GENERAL". * GENERAL: A plain-text environment
+ variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and
+ "SECRET".
+ "value":
+ "str" # Optional. The value. If the type is
+ ``SECRET``"" , the value will be encrypted on
+ first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "repo_clone_url":
+ "str" # Optional. The clone URL of the repo.
+ Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled":
+ bool # Optional. Whether to automatically deploy
+ new images. Can only be used for images hosted in
+ DOCR and can only be used with an image tag, not
+ a specific digest.
+ },
+ "digest": "str", #
+ Optional. The image digest. Cannot be specified if
+ tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for
+ the ``DOCR`` registry type.
+ "registry_credentials": "str", # Optional. The
+ credentials to be able to pull the image. The value
+ will be encrypted on first submission. On following
+ submissions, the encrypted value should be used. *
+ "$username:$access_token" for registries of type
+ ``DOCKER_HUB``. * "$username:$access_token" for
+ registries of type ``GHCR``.
+ "registry_type":
+ "str", # Optional. * DOCKER_HUB: The DockerHub
+ container registry type. * DOCR: The DigitalOcean
+ container registry type. * GHCR: The Github container
+ registry type. Known values are: "DOCKER_HUB",
+ "DOCR", and "GHCR".
+ "repository": "str",
+ # Optional. The repository name.
+ "tag": "latest" #
+ Optional. Default value is "latest". The repository
+ tag. Defaults to ``latest`` if not provided and no
+ digest is provided. Cannot be specified if digest is
+ provided.
+ },
+ "instance_count": 1, #
+ Optional. Default value is 1. The amount of instances
+ that this component should be scaled to. Default: 1. Must
+ not be set if autoscaling is used.
+ "instance_size_slug": {},
+ "kind": "UNSPECIFIED", #
+ Optional. Default value is "UNSPECIFIED". * UNSPECIFIED:
+ Default job type, will auto-complete to POST_DEPLOY kind.
+ * PRE_DEPLOY: Indicates a job that runs before an app
+ deployment. * POST_DEPLOY: Indicates a job that runs
+ after an app deployment. * FAILED_DEPLOY: Indicates a job
+ that runs after a component fails to deploy. Known values
+ are: "UNSPECIFIED", "PRE_DEPLOY", "POST_DEPLOY", and
+ "FAILED_DEPLOY".
+ "log_destinations": [
+ {
+ "name":
+ "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog API key.
+ Required.
+ "endpoint": "str" # Optional. Datadog HTTP
+ log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", # Optional. Password
+ for user defined in User. Is required
+ when ``endpoint`` is set. Cannot be set
+ if using a DigitalOcean DBaaS OpenSearch
+ cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name
+ of a DigitalOcean DBaaS OpenSearch cluster to
+ use as a log forwarding destination. Cannot
+ be specified if ``endpoint`` is also
+ specified.
+ "endpoint": "str", # Optional. OpenSearch
+ API Endpoint. Only HTTPS is supported.
+ Format:
+ https://:code:``::code:``. Cannot
+ be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" # Optional. Default
+ value is "logs". The index name to use for
+ the logs. If not set, the default index name
+ is "logs".
+ },
+ "papertrail":
+ {
+ "endpoint": "str" # Papertrail syslog
+ endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional.
+ The name. Must be unique across all components within the
+ same app.
+ "run_command": "str", #
+ Optional. An optional run command to override the
+ component's default.
+ "source_dir": "str", #
+ Optional. An optional path to the working directory to
+ use for the build. For Dockerfile builds, this will be
+ used as the build context. Must be relative to the root
+ of the repo.
+ "termination": {
+ "grace_period_seconds": 0 # Optional. The number of
+ seconds to wait between sending a TERM signal to a
+ container and issuing a KILL which causes immediate
+ shutdown. (Default 120).
+ }
}
],
- "expose_headers": [
- "str" # Optional. The set of HTTP
- response headers that browsers are allowed to access. This
- configures the ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An optional
- duration specifying how long browsers can cache the results of a
- preflight request. This configures the ``Access-Control-Max-Age``
- header.
- },
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
- }
- ],
- "error_document": "404.html", # Optional. Default
- value is "404.html". The name of the error document to use when
- serving this static site. Default: 404.html. If no such file exists
- within the built assets, App Platform will supply one.
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
- },
- "index_document": "index.html", # Optional. Default
- value is "index.html". The name of the index document to use when
- serving this static site. Default: index.html.
- "log_destinations": [
- {
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
- },
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
- },
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
+ "maintenance": {
+ "archive": bool, # Optional.
+ Indicates whether the app should be archived. Setting this to
+ true implies that enabled is set to true.
+ "enabled": bool, # Optional.
+ Indicates whether maintenance mode should be enabled for the
+ app.
+ "offline_page_url": "str" #
+ Optional. A custom offline page to display when maintenance
+ mode is enabled or the app is archived.
+ },
+ "region": "str", # Optional. The slug form
+ of the geographical origin of the app. Default: ``nearest
+ available``. Known values are: "atl", "nyc", "sfo", "tor", "ams",
+ "fra", "lon", "blr", "sgp", and "syd".
+ "services": [
+ {
+ "autoscaling": {
+ "max_instance_count":
+ 0, # Optional. The maximum amount of instances for
+ this component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80 # Optional. Default value is
+ 80. The average target CPU utilization for
+ the component.
+ }
+ },
+ "min_instance_count":
+ 0 # Optional. The minimum amount of instances for
+ this component. Must be less than max_instance_count.
},
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
- },
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
+ "bitbucket": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", #
+ Optional. An optional build command to run while building
+ this component from source.
+ "cors": {
+ "allow_credentials":
+ bool, # Optional. Whether browsers should expose the
+ response to the client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" #
+ Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" #
+ Optional. The set of allowed HTTP methods. This
+ configures the ``Access-Control-Allow-Methods``
+ header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "prefix": "str", # Optional. Prefix-based
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For
+ more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" #
+ Optional. The set of HTTP response headers that
+ browsers are allowed to access. This configures
+ the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" #
+ Optional. An optional duration specifying how long
+ browsers can cache the results of a preflight
+ request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root
+ of the repo. If set, it will be used to build this
+ component. Otherwise, App Platform will attempt to build
+ it using buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str",
+ # The variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value
+ is "RUN_AND_BUILD_TIME". * RUN_TIME: Made
+ available only at run-time * BUILD_TIME: Made
+ available only at build-time *
+ RUN_AND_BUILD_TIME: Made available at both build
+ and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and
+ "RUN_AND_BUILD_TIME".
+ "type":
+ "GENERAL", # Optional. Default value is
+ "GENERAL". * GENERAL: A plain-text environment
+ variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and
+ "SECRET".
+ "value":
+ "str" # Optional. The value. If the type is
+ ``SECRET``"" , the value will be encrypted on
+ first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "repo_clone_url":
+ "str" # Optional. The clone URL of the repo.
+ Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "health_check": {
+ "failure_threshold":
+ 0, # Optional. The number of failed health checks
+ before considered unhealthy.
+ "http_path": "str",
+ # Optional. The route path used for the HTTP health
+ check ping. If not set, the HTTP health check will be
+ disabled and a TCP health check used instead.
+ "initial_delay_seconds": 0, # Optional. The number
+ of seconds to wait before beginning health checks.
+ "period_seconds": 0,
+ # Optional. The number of seconds to wait between
+ health checks.
+ "port": 0, #
+ Optional. The port on which the health check will be
+ performed. If not set, the health check will be
+ performed on the component's http_port.
+ "success_threshold":
+ 0, # Optional. The number of successful health
+ checks before considered healthy.
+ "timeout_seconds": 0
+ # Optional. The number of seconds after which the
+ check times out.
+ },
+ "http_port": 0, # Optional.
+ The internal port on which this service's run command
+ will listen. Default: 8080 If there is not an environment
+ variable with the name ``PORT``"" , one will be
+ automatically added with its value set to the value of
+ this field.
+ "image": {
+ "deploy_on_push": {
+ "enabled":
+ bool # Optional. Whether to automatically deploy
+ new images. Can only be used for images hosted in
+ DOCR and can only be used with an image tag, not
+ a specific digest.
+ },
+ "digest": "str", #
+ Optional. The image digest. Cannot be specified if
+ tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for
+ the ``DOCR`` registry type.
+ "registry_credentials": "str", # Optional. The
+ credentials to be able to pull the image. The value
+ will be encrypted on first submission. On following
+ submissions, the encrypted value should be used. *
+ "$username:$access_token" for registries of type
+ ``DOCKER_HUB``. * "$username:$access_token" for
+ registries of type ``GHCR``.
+ "registry_type":
+ "str", # Optional. * DOCKER_HUB: The DockerHub
+ container registry type. * DOCR: The DigitalOcean
+ container registry type. * GHCR: The Github container
+ registry type. Known values are: "DOCKER_HUB",
+ "DOCR", and "GHCR".
+ "repository": "str",
+ # Optional. The repository name.
+ "tag": "latest" #
+ Optional. Default value is "latest". The repository
+ tag. Defaults to ``latest`` if not provided and no
+ digest is provided. Cannot be specified if digest is
+ provided.
+ },
+ "instance_count": 1, #
+ Optional. Default value is 1. The amount of instances
+ that this component should be scaled to. Default: 1. Must
+ not be set if autoscaling is used.
+ "instance_size_slug": {},
+ "internal_ports": [
+ 0 # Optional. The
+ ports on which this service will listen for internal
+ traffic.
+ ],
+ "liveness_health_check": {
+ "failure_threshold":
+ 0, # Optional. The number of failed health checks
+ before considered unhealthy.
+ "http_path": "str",
+ # Optional. The route path used for the HTTP health
+ check ping. If not set, the HTTP health check will be
+ disabled and a TCP health check used instead.
+ "initial_delay_seconds": 0, # Optional. The number
+ of seconds to wait before beginning health checks.
+ "period_seconds": 0,
+ # Optional. The number of seconds to wait between
+ health checks.
+ "port": 0, #
+ Optional. The port on which the health check will be
+ performed.
+ "success_threshold":
+ 0, # Optional. The number of successful health
+ checks before considered healthy.
+ "timeout_seconds": 0
+ # Optional. The number of seconds after which the
+ check times out.
+ },
+ "log_destinations": [
+ {
+ "name":
+ "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog API key.
+ Required.
+ "endpoint": "str" # Optional. Datadog HTTP
+ log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", # Optional. Password
+ for user defined in User. Is required
+ when ``endpoint`` is set. Cannot be set
+ if using a DigitalOcean DBaaS OpenSearch
+ cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name
+ of a DigitalOcean DBaaS OpenSearch cluster to
+ use as a log forwarding destination. Cannot
+ be specified if ``endpoint`` is also
+ specified.
+ "endpoint": "str", # Optional. OpenSearch
+ API Endpoint. Only HTTPS is supported.
+ Format:
+ https://:code:``::code:``. Cannot
+ be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" # Optional. Default
+ value is "logs". The index name to use for
+ the logs. If not set, the default index name
+ is "logs".
+ },
+ "papertrail":
+ {
+ "endpoint": "str" # Papertrail syslog
+ endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional.
+ The name. Must be unique across all components within the
+ same app.
+ "protocol": "str", #
+ Optional. The protocol which the service uses to serve
+ traffic on the http_port. * ``HTTP``"" : The app is
+ serving the HTTP protocol. Default. * ``HTTP2``"" : The
+ app is serving the HTTP/2 protocol. Currently, this needs
+ to be implemented in the service by serving HTTP/2
+ cleartext (h2c). Known values are: "HTTP" and "HTTP2".
+ "routes": [
+ {
+ "path":
+ "str", # Optional. (Deprecated - Use Ingress
+ Rules instead). An HTTP path prefix. Paths must
+ start with / and must be unique across all
+ components within an app.
+ "preserve_path_prefix": bool # Optional. An
+ optional flag to preserve the path that is
+ forwarded to the backend service. By default, the
+ HTTP request path will be trimmed from the left
+ when forwarded to the component. For example, a
+ component with ``path=/api`` will have requests
+ to ``/api/list`` trimmed to ``/list``. If this
+ value is ``true``"" , the path will remain
+ ``/api/list``.
+ }
+ ],
+ "run_command": "str", #
+ Optional. An optional run command to override the
+ component's default.
+ "source_dir": "str", #
+ Optional. An optional path to the working directory to
+ use for the build. For Dockerfile builds, this will be
+ used as the build context. Must be relative to the root
+ of the repo.
+ "termination": {
+ "drain_seconds": 0,
+ # Optional. The number of seconds to wait between
+ selecting a container instance for termination and
+ issuing the TERM signal. Selecting a container
+ instance for termination begins an asynchronous drain
+ of new requests on upstream load-balancers. (Default
+ 15).
+ "grace_period_seconds": 0 # Optional. The number of
+ seconds to wait between sending a TERM signal to a
+ container and issuing a KILL which causes immediate
+ shutdown. (Default 120).
+ }
}
- }
- ],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "output_dir": "str", # Optional. An optional path to
- where the built assets will be located, relative to the build
- context. If not set, App Platform will automatically scan for these
- directory names: ``_static``"" , ``dist``"" , ``public``"" ,
- ``build``.
- "routes": [
- {
- "path": "str", # Optional.
- (Deprecated - Use Ingress Rules instead). An HTTP path
- prefix. Paths must start with / and must be unique across all
- components within an app.
- "preserve_path_prefix": bool #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``.
- }
- ],
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str" # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- }
- ],
- "vpc": {
- "egress_ips": [
- {
- "ip": "str" # Optional. The egress ips
- associated with the VPC.
- }
- ],
- "id": "str" # Optional. The ID of the VPC.
- },
- "workers": [
- {
- "autoscaling": {
- "max_instance_count": 0, # Optional. The
- maximum amount of instances for this component. Must be more than
- min_instance_count.
- "metrics": {
- "cpu": {
- "percent": 80 # Optional.
- Default value is 80. The average target CPU utilization
- for the component.
+ ],
+ "static_sites": [
+ {
+ "bitbucket": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", #
+ Optional. An optional build command to run while building
+ this component from source.
+ "catchall_document": "str",
+ # Optional. The name of the document to use as the
+ fallback for any requests to documents that are not found
+ when serving this static site. Only 1 of
+ ``catchall_document`` or ``error_document`` can be set.
+ "cors": {
+ "allow_credentials":
+ bool, # Optional. Whether browsers should expose the
+ response to the client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" #
+ Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" #
+ Optional. The set of allowed HTTP methods. This
+ configures the ``Access-Control-Allow-Methods``
+ header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "prefix": "str", # Optional. Prefix-based
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For
+ more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" #
+ Optional. The set of HTTP response headers that
+ browsers are allowed to access. This configures
+ the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" #
+ Optional. An optional duration specifying how long
+ browsers can cache the results of a preflight
+ request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root
+ of the repo. If set, it will be used to build this
+ component. Otherwise, App Platform will attempt to build
+ it using buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str",
+ # The variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value
+ is "RUN_AND_BUILD_TIME". * RUN_TIME: Made
+ available only at run-time * BUILD_TIME: Made
+ available only at build-time *
+ RUN_AND_BUILD_TIME: Made available at both build
+ and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and
+ "RUN_AND_BUILD_TIME".
+ "type":
+ "GENERAL", # Optional. Default value is
+ "GENERAL". * GENERAL: A plain-text environment
+ variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and
+ "SECRET".
+ "value":
+ "str" # Optional. The value. If the type is
+ ``SECRET``"" , the value will be encrypted on
+ first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "error_document": "404.html",
+ # Optional. Default value is "404.html". The name of the
+ error document to use when serving this static site.
+ Default: 404.html. If no such file exists within the
+ built assets, App Platform will supply one.
+ "git": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "repo_clone_url":
+ "str" # Optional. The clone URL of the repo.
+ Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled":
+ bool # Optional. Whether to automatically deploy
+ new images. Can only be used for images hosted in
+ DOCR and can only be used with an image tag, not
+ a specific digest.
+ },
+ "digest": "str", #
+ Optional. The image digest. Cannot be specified if
+ tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for
+ the ``DOCR`` registry type.
+ "registry_credentials": "str", # Optional. The
+ credentials to be able to pull the image. The value
+ will be encrypted on first submission. On following
+ submissions, the encrypted value should be used. *
+ "$username:$access_token" for registries of type
+ ``DOCKER_HUB``. * "$username:$access_token" for
+ registries of type ``GHCR``.
+ "registry_type":
+ "str", # Optional. * DOCKER_HUB: The DockerHub
+ container registry type. * DOCR: The DigitalOcean
+ container registry type. * GHCR: The Github container
+ registry type. Known values are: "DOCKER_HUB",
+ "DOCR", and "GHCR".
+ "repository": "str",
+ # Optional. The repository name.
+ "tag": "latest" #
+ Optional. Default value is "latest". The repository
+ tag. Defaults to ``latest`` if not provided and no
+ digest is provided. Cannot be specified if digest is
+ provided.
+ },
+ "index_document":
+ "index.html", # Optional. Default value is "index.html".
+ The name of the index document to use when serving this
+ static site. Default: index.html.
+ "log_destinations": [
+ {
+ "name":
+ "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog API key.
+ Required.
+ "endpoint": "str" # Optional. Datadog HTTP
+ log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", # Optional. Password
+ for user defined in User. Is required
+ when ``endpoint`` is set. Cannot be set
+ if using a DigitalOcean DBaaS OpenSearch
+ cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name
+ of a DigitalOcean DBaaS OpenSearch cluster to
+ use as a log forwarding destination. Cannot
+ be specified if ``endpoint`` is also
+ specified.
+ "endpoint": "str", # Optional. OpenSearch
+ API Endpoint. Only HTTPS is supported.
+ Format:
+ https://:code:``::code:``. Cannot
+ be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" # Optional. Default
+ value is "logs". The index name to use for
+ the logs. If not set, the default index name
+ is "logs".
+ },
+ "papertrail":
+ {
+ "endpoint": "str" # Papertrail syslog
+ endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional.
+ The name. Must be unique across all components within the
+ same app.
+ "output_dir": "str", #
+ Optional. An optional path to where the built assets will
+ be located, relative to the build context. If not set,
+ App Platform will automatically scan for these directory
+ names: ``_static``"" , ``dist``"" , ``public``"" ,
+ ``build``.
+ "routes": [
+ {
+ "path":
+ "str", # Optional. (Deprecated - Use Ingress
+ Rules instead). An HTTP path prefix. Paths must
+ start with / and must be unique across all
+ components within an app.
+ "preserve_path_prefix": bool # Optional. An
+ optional flag to preserve the path that is
+ forwarded to the backend service. By default, the
+ HTTP request path will be trimmed from the left
+ when forwarded to the component. For example, a
+ component with ``path=/api`` will have requests
+ to ``/api/list`` trimmed to ``/list``. If this
+ value is ``true``"" , the path will remain
+ ``/api/list``.
+ }
+ ],
+ "run_command": "str", #
+ Optional. An optional run command to override the
+ component's default.
+ "source_dir": "str" #
+ Optional. An optional path to the working directory to
+ use for the build. For Dockerfile builds, this will be
+ used as the build context. Must be relative to the root
+ of the repo.
}
+ ],
+ "vpc": {
+ "egress_ips": [
+ {
+ "ip": "str" #
+ Optional. The egress ips associated with the VPC.
+ }
+ ],
+ "id": "str" # Optional. The ID of
+ the VPC.
},
- "min_instance_count": 0 # Optional. The
- minimum amount of instances for this component. Must be less than
- max_instance_count.
- },
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
+ "workers": [
+ {
+ "autoscaling": {
+ "max_instance_count":
+ 0, # Optional. The maximum amount of instances for
+ this component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80 # Optional. Default value is
+ 80. The average target CPU utilization for
+ the component.
+ }
+ },
+ "min_instance_count":
+ 0 # Optional. The minimum amount of instances for
+ this component. Must be less than max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", #
+ Optional. An optional build command to run while building
+ this component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root
+ of the repo. If set, it will be used to build this
+ component. Otherwise, App Platform will attempt to build
+ it using buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str",
+ # The variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value
+ is "RUN_AND_BUILD_TIME". * RUN_TIME: Made
+ available only at run-time * BUILD_TIME: Made
+ available only at build-time *
+ RUN_AND_BUILD_TIME: Made available at both build
+ and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and
+ "RUN_AND_BUILD_TIME".
+ "type":
+ "GENERAL", # Optional. Default value is
+ "GENERAL". * GENERAL: A plain-text environment
+ variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and
+ "SECRET".
+ "value":
+ "str" # Optional. The value. If the type is
+ ``SECRET``"" , the value will be encrypted on
+ first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "repo_clone_url":
+ "str" # Optional. The clone URL of the repo.
+ Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", #
+ Optional. The name of the branch to use.
+ "deploy_on_push":
+ bool, # Optional. Whether to automatically deploy
+ new commits made to the repo.
+ "repo": "str" #
+ Optional. The name of the repo in the format
+ owner/repo. Example: ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled":
+ bool # Optional. Whether to automatically deploy
+ new images. Can only be used for images hosted in
+ DOCR and can only be used with an image tag, not
+ a specific digest.
+ },
+ "digest": "str", #
+ Optional. The image digest. Cannot be specified if
+ tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for
+ the ``DOCR`` registry type.
+ "registry_credentials": "str", # Optional. The
+ credentials to be able to pull the image. The value
+ will be encrypted on first submission. On following
+ submissions, the encrypted value should be used. *
+ "$username:$access_token" for registries of type
+ ``DOCKER_HUB``. * "$username:$access_token" for
+ registries of type ``GHCR``.
+ "registry_type":
+ "str", # Optional. * DOCKER_HUB: The DockerHub
+ container registry type. * DOCR: The DigitalOcean
+ container registry type. * GHCR: The Github container
+ registry type. Known values are: "DOCKER_HUB",
+ "DOCR", and "GHCR".
+ "repository": "str",
+ # Optional. The repository name.
+ "tag": "latest" #
+ Optional. Default value is "latest". The repository
+ tag. Defaults to ``latest`` if not provided and no
+ digest is provided. Cannot be specified if digest is
+ provided.
+ },
+ "instance_count": 1, #
+ Optional. Default value is 1. The amount of instances
+ that this component should be scaled to. Default: 1. Must
+ not be set if autoscaling is used.
+ "instance_size_slug": {},
+ "liveness_health_check": {
+ "failure_threshold":
+ 0, # Optional. The number of failed health checks
+ before considered unhealthy.
+ "http_path": "str",
+ # Optional. The route path used for the HTTP health
+ check ping. If not set, the HTTP health check will be
+ disabled and a TCP health check used instead.
+ "initial_delay_seconds": 0, # Optional. The number
+ of seconds to wait before beginning health checks.
+ "period_seconds": 0,
+ # Optional. The number of seconds to wait between
+ health checks.
+ "port": 0, #
+ Optional. The port on which the health check will be
+ performed.
+ "success_threshold":
+ 0, # Optional. The number of successful health
+ checks before considered healthy.
+ "timeout_seconds": 0
+ # Optional. The number of seconds after which the
+ check times out.
+ },
+ "log_destinations": [
+ {
+ "name":
+ "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog API key.
+ Required.
+ "endpoint": "str" # Optional. Datadog HTTP
+ log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", # Optional. Password
+ for user defined in User. Is required
+ when ``endpoint`` is set. Cannot be set
+ if using a DigitalOcean DBaaS OpenSearch
+ cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name
+ of a DigitalOcean DBaaS OpenSearch cluster to
+ use as a log forwarding destination. Cannot
+ be specified if ``endpoint`` is also
+ specified.
+ "endpoint": "str", # Optional. OpenSearch
+ API Endpoint. Only HTTPS is supported.
+ Format:
+ https://:code:``::code:``. Cannot
+ be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" # Optional. Default
+ value is "logs". The index name to use for
+ the logs. If not set, the default index name
+ is "logs".
+ },
+ "papertrail":
+ {
+ "endpoint": "str" # Papertrail syslog
+ endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional.
+ The name. Must be unique across all components within the
+ same app.
+ "run_command": "str", #
+ Optional. An optional run command to override the
+ component's default.
+ "source_dir": "str", #
+ Optional. An optional path to the working directory to
+ use for the build. For Dockerfile builds, this will be
+ used as the build context. Must be relative to the root
+ of the repo.
+ "termination": {
+ "grace_period_seconds": 0 # Optional. The number of
+ seconds to wait between sending a TERM signal to a
+ container and issuing a KILL which causes immediate
+ shutdown. (Default 120).
+ }
+ }
+ ]
},
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
+ "static_sites": [
{
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
+ "name": "str", # Optional. The name
+ of this static site.
+ "source_commit_hash": "str" #
+ Optional. The commit hash of the repository that was used to
+ build this static site.
}
],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
- },
- "instance_count": 1, # Optional. Default value is 1.
- The amount of instances that this component should be scaled to.
- Default: 1. Must not be set if autoscaling is used.
- "instance_size_slug": {},
- "liveness_health_check": {
- "failure_threshold": 0, # Optional. The
- number of failed health checks before considered unhealthy.
- "http_path": "str", # Optional. The route
- path used for the HTTP health check ping. If not set, the HTTP
- health check will be disabled and a TCP health check used
- instead.
- "initial_delay_seconds": 0, # Optional. The
- number of seconds to wait before beginning health checks.
- "period_seconds": 0, # Optional. The number
- of seconds to wait between health checks.
- "port": 0, # Optional. The port on which the
- health check will be performed.
- "success_threshold": 0, # Optional. The
- number of successful health checks before considered healthy.
- "timeout_seconds": 0 # Optional. The number
- of seconds after which the check times out.
- },
- "log_destinations": [
+ "tier_slug": "str", # Optional. The current pricing
+ tier slug of the deployment.
+ "updated_at": "2020-02-20 00:00:00", # Optional.
+ When the deployment was last updated.
+ "workers": [
{
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
- },
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
- },
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
- },
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
- },
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
- }
+ "name": "str", # Optional. The name
+ of this worker.
+ "source_commit_hash": "str" #
+ Optional. The commit hash of the repository that was used to
+ build this worker.
}
- ],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str", # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- "termination": {
- "grace_period_seconds": 0 # Optional. The
- number of seconds to wait between sending a TERM signal to a
- container and issuing a KILL which causes immediate shutdown.
- (Default 120).
- }
- }
- ]
- },
- "app_id": "str" # Optional. An optional ID of an existing app. If set, the
- spec will be treated as a proposed update to the specified app. The existing app
- is not modified using this method.
+ ]
+ },
+ "deployment_id": "str", # Optional. For deployment events,
+ this is the same as the deployment's ID. For autoscaling events, this is
+ the deployment that was autoscaled.
+ "id": "str", # Optional. The ID of the event (UUID).
+ "type": "str" # Optional. The type of event. Known values
+ are: "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING".
+ }
+ ],
+ "links": {
+ "pages": {}
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
}
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_list_events_request(
+ app_id=app_id,
+ page=page,
+ per_page=per_page,
+ event_types=event_types,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace
+ def get_event(self, app_id: str, event_id: str, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """Get an Event.
+
+ Get a single event for an app.
+
+ :param app_id: The app ID. Required.
+ :type app_id: str
+ :param event_id: The event ID. Required.
+ :type event_id: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
# response body for status code(s): 200
response == {
- "app_cost": 0, # Optional. The monthly cost of the proposed app in USD.
- "app_is_static": bool, # Optional. Indicates whether the app is a static
- app.
- "app_name_available": bool, # Optional. Indicates whether the app name is
- available.
- "app_name_suggestion": "str", # Optional. The suggested name if the proposed
- app name is unavailable.
- "app_tier_downgrade_cost": 0, # Optional. The monthly cost of the proposed
- app in USD using the previous pricing plan tier. For example, if you propose an
- app that uses the Professional tier, the ``app_tier_downgrade_cost`` field
- displays the monthly cost of the app if it were to use the Basic tier. If the
- proposed app already uses the lest expensive tier, the field is empty.
- "existing_static_apps": "str", # Optional. The maximum number of free static
- apps the account can have. We will charge you for any additional static apps.
- "spec": {
- "name": "str", # The name of the app. Must be unique across all apps
- in the same account. Required.
- "databases": [
- {
- "name": "str", # The database's name. The name must
- be unique across all components within the same app and cannot use
- capital letters. Required.
- "cluster_name": "str", # Optional. The name of the
- underlying DigitalOcean DBaaS cluster. This is required for
- production databases. For dev databases, if cluster_name is not set,
- a new cluster will be provisioned.
- "db_name": "str", # Optional. The name of the MySQL
- or PostgreSQL database to configure.
- "db_user": "str", # Optional. The name of the MySQL
- or PostgreSQL user to configure.
- "engine": "UNSET", # Optional. Default value is
- "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB:
- MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey.
- Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB",
- "KAFKA", "OPENSEARCH", and "VALKEY".
- "production": bool, # Optional. Whether this is a
- production or dev database.
- "version": "str" # Optional. The version of the
- database engine.
- }
- ],
- "disable_edge_cache": False, # Optional. Default value is False. ..
- role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app
- will **not** be cached at the edge (CDN). Enable this option if you want to
- manage CDN configuration yourself"u2014whether by using an external CDN
- provider or by handling static content and caching within your app. This
- setting is also recommended for apps that require real-time data or serve
- dynamic content, such as those using Server-Sent Events (SSE) over GET, or
- hosting an MCP (Model Context Protocol) Server that utilizes SSE.""
- :raw-html-m2r:`
` **Note:** This feature is not available for static site
- components."" :raw-html-m2r:`
` For more information, see `Disable CDN
- Cache
- `_.
- "disable_email_obfuscation": False, # Optional. Default value is
- False. If set to ``true``"" , email addresses in the app will not be
- obfuscated. This is useful for apps that require email addresses to be
- visible (in the HTML markup).
- "domains": [
- {
- "domain": "str", # The hostname for the domain.
- Required.
- "minimum_tls_version": "str", # Optional. The
- minimum version of TLS a client application can use to access
- resources for the domain. Must be one of the following values
- wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are:
- "1.2" and "1.3".
- "type": "UNSPECIFIED", # Optional. Default value is
- "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain
- assigned to this app * PRIMARY: The primary domain for this app that
- is displayed as the default in the control panel, used in bindable
- environment variables, and any other places that reference an app's
- live URL. Only one domain may be set as primary. * ALIAS: A
- non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT",
- "PRIMARY", and "ALIAS".
- "wildcard": bool, # Optional. Indicates whether the
- domain includes all sub-domains, in addition to the given domain.
- "zone": "str" # Optional. Optional. If the domain
- uses DigitalOcean DNS and you would like App Platform to
- automatically manage it for you, set this to the name of the domain
- on your account. For example, If the domain you are adding is
- ``app.domain.com``"" , the zone could be ``domain.com``.
- }
- ],
- "egress": {
- "type": "AUTOASSIGN" # Optional. Default value is
- "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and
- "DEDICATED_IP".
+ "event": {
+ "autoscaling": {
+ "components": {
+ "str": {
+ "from": 0, # Optional. The number of
+ replicas before scaling.
+ "to": 0, # Optional. The number of replicas
+ after scaling.
+ "triggering_metric": "str" # Optional. The
+ metric that triggered the scale change. Known values are "cpu",
+ "requests_per_second", "request_duration". For inactivity sleep,
+ "scale_from_zero" and "scale_to_zero" are used.
+ }
+ },
+ "phase": "str" # Optional. The current phase of the
+ autoscaling event. Known values are: "UNKNOWN", "PENDING", "IN_PROGRESS",
+ "SUCCEEDED", "FAILED", and "CANCELED".
},
- "enhanced_threat_control_enabled": False, # Optional. Default value
- is False. If set to ``true``"" , suspicious requests will go through
- additional security checks to help mitigate layer 7 DDoS attacks.
- "functions": [
- {
- "name": "str", # The name. Must be unique across all
- components within the same app. Required.
- "alerts": [
+ "created_at": "2020-02-20 00:00:00", # Optional. When the event was
+ created.
+ "deployment": {
+ "cause": "str", # Optional. What caused this deployment to
+ be created.
+ "cloned_from": "str", # Optional. The ID of a previous
+ deployment that this deployment was cloned from.
+ "created_at": "2020-02-20 00:00:00", # Optional. The
+ creation time of the deployment.
+ "functions": [
+ {
+ "name": "str", # Optional. The name of this
+ functions component.
+ "namespace": "str", # Optional. The
+ namespace where the functions are deployed.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this
+ functions component.
+ }
+ ],
+ "id": "str", # Optional. The ID of the deployment.
+ "jobs": [
+ {
+ "name": "str", # Optional. The name of this
+ job.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this job.
+ }
+ ],
+ "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN".
+ Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING",
+ "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and
+ "CANCELED".
+ "phase_last_updated_at": "2020-02-20 00:00:00", # Optional.
+ When the deployment phase was last updated.
+ "progress": {
+ "error_steps": 0, # Optional. Number of unsuccessful
+ steps.
+ "pending_steps": 0, # Optional. Number of pending
+ steps.
+ "running_steps": 0, # Optional. Number of currently
+ running steps.
+ "steps": [
{
- "disabled": bool, # Optional. Is the
- alert disabled?.
- "operator": "UNSPECIFIED_OPERATOR",
- # Optional. Default value is "UNSPECIFIED_OPERATOR". Known
- values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
- "LESS_THAN".
- "rule": "UNSPECIFIED_RULE", #
- Optional. Default value is "UNSPECIFIED_RULE". Known values
- are: "UNSPECIFIED_RULE", "CPU_UTILIZATION",
- "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED",
- "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE",
- "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED",
- "FUNCTIONS_ACTIVATION_COUNT",
- "FUNCTIONS_AVERAGE_DURATION_MS",
- "FUNCTIONS_ERROR_RATE_PER_MINUTE",
- "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT",
- and "FUNCTIONS_GB_RATE_PER_SECOND".
- "value": 0.0, # Optional. Threshold
- value for alert.
- "window": "UNSPECIFIED_WINDOW" #
- Optional. Default value is "UNSPECIFIED_WINDOW". Known values
- are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
- "THIRTY_MINUTES", and "ONE_HOUR".
+ "component_name": "str", # Optional.
+ The component name that this step is associated with.
+ "ended_at": "2020-02-20 00:00:00", #
+ Optional. The end time of this step.
+ "message_base": "str", # Optional.
+ The base of a human-readable description of the step intended
+ to be combined with the component name for presentation. For
+ example: ``message_base`` = "Building service"
+ ``component_name`` = "api".
+ "name": "str", # Optional. The name
+ of this step.
+ "reason": {
+ "code": "str", # Optional.
+ The error code.
+ "message": "str" # Optional.
+ The error message.
+ },
+ "started_at": "2020-02-20 00:00:00",
+ # Optional. The start time of this step.
+ "status": "UNKNOWN", # Optional.
+ Default value is "UNKNOWN". Known values are: "UNKNOWN",
+ "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child steps
+ of this step.
+ ]
}
],
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "cors": {
- "allow_credentials": bool, # Optional.
- Whether browsers should expose the response to the client-side
- JavaScript code when the request"u2019s credentials mode is
- include. This configures the ``Access-Control-Allow-Credentials``
- header.
- "allow_headers": [
- "str" # Optional. The set of allowed
- HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of allowed
- HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
- ],
- "allow_origins": [
- {
- "exact": "str", # Optional.
- Exact string match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "prefix": "str", # Optional.
- Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "regex": "str" # Optional.
- RE2 style regex-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set. For more
- information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
- }
- ],
- "expose_headers": [
- "str" # Optional. The set of HTTP
- response headers that browsers are allowed to access. This
- configures the ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An optional
- duration specifying how long browsers can cache the results of a
- preflight request. This configures the ``Access-Control-Max-Age``
- header.
- },
- "envs": [
+ "success_steps": 0, # Optional. Number of successful
+ steps.
+ "summary_steps": [
{
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
+ "component_name": "str", # Optional.
+ The component name that this step is associated with.
+ "ended_at": "2020-02-20 00:00:00", #
+ Optional. The end time of this step.
+ "message_base": "str", # Optional.
+ The base of a human-readable description of the step intended
+ to be combined with the component name for presentation. For
+ example: ``message_base`` = "Building service"
+ ``component_name`` = "api".
+ "name": "str", # Optional. The name
+ of this step.
+ "reason": {
+ "code": "str", # Optional.
+ The error code.
+ "message": "str" # Optional.
+ The error message.
+ },
+ "started_at": "2020-02-20 00:00:00",
+ # Optional. The start time of this step.
+ "status": "UNKNOWN", # Optional.
+ Default value is "UNKNOWN". Known values are: "UNKNOWN",
+ "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child steps
+ of this step.
+ ]
}
],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "log_destinations": [
+ "total_steps": 0 # Optional. Total number of steps.
+ },
+ "services": [
+ {
+ "name": "str", # Optional. The name of this
+ service.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this
+ service.
+ }
+ ],
+ "spec": {
+ "name": "str", # The name of the app. Must be unique
+ across all apps in the same account. Required.
+ "databases": [
{
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
- },
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
- },
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
- },
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
- },
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
- }
+ "name": "str", # The database's
+ name. The name must be unique across all components within
+ the same app and cannot use capital letters. Required.
+ "cluster_name": "str", # Optional.
+ The name of the underlying DigitalOcean DBaaS cluster. This
+ is required for production databases. For dev databases, if
+ cluster_name is not set, a new cluster will be provisioned.
+ "db_name": "str", # Optional. The
+ name of the MySQL or PostgreSQL database to configure.
+ "db_user": "str", # Optional. The
+ name of the MySQL or PostgreSQL user to configure.
+ "engine": "UNSET", # Optional.
+ Default value is "UNSET". * MYSQL: MySQL * PG: PostgreSQL *
+ REDIS: Caching * MONGODB: MongoDB * KAFKA: Kafka *
+ OPENSEARCH: OpenSearch * VALKEY: ValKey. Known values are:
+ "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", "KAFKA",
+ "OPENSEARCH", and "VALKEY".
+ "production": bool, # Optional.
+ Whether this is a production or dev database.
+ "version": "str" # Optional. The
+ version of the database engine.
}
],
- "routes": [
+ "disable_edge_cache": False, # Optional. Default
+ value is False. .. role:: raw-html-m2r(raw) :format: html If set
+ to ``true``"" , the app will **not** be cached at the edge (CDN).
+ Enable this option if you want to manage CDN configuration
+ yourself"u2014whether by using an external CDN provider or by
+ handling static content and caching within your app. This setting is
+ also recommended for apps that require real-time data or serve
+ dynamic content, such as those using Server-Sent Events (SSE) over
+ GET, or hosting an MCP (Model Context Protocol) Server that utilizes
+ SSE."" :raw-html-m2r:`
` **Note:** This feature is not available
+ for static site components."" :raw-html-m2r:`
` For more
+ information, see `Disable CDN Cache
+ `_.
+ "disable_email_obfuscation": False, # Optional.
+ Default value is False. If set to ``true``"" , email addresses in the
+ app will not be obfuscated. This is useful for apps that require
+ email addresses to be visible (in the HTML markup).
+ "domains": [
{
- "path": "str", # Optional.
- (Deprecated - Use Ingress Rules instead). An HTTP path
- prefix. Paths must start with / and must be unique across all
- components within an app.
- "preserve_path_prefix": bool #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``.
+ "domain": "str", # The hostname for
+ the domain. Required.
+ "minimum_tls_version": "str", #
+ Optional. The minimum version of TLS a client application can
+ use to access resources for the domain. Must be one of the
+ following values wrapped within quotations: ``"1.2"`` or
+ ``"1.3"``. Known values are: "1.2" and "1.3".
+ "type": "UNSPECIFIED", # Optional.
+ Default value is "UNSPECIFIED". * DEFAULT: The default
+ ``.ondigitalocean.app`` domain assigned to this app *
+ PRIMARY: The primary domain for this app that is displayed as
+ the default in the control panel, used in bindable
+ environment variables, and any other places that reference an
+ app's live URL. Only one domain may be set as primary. *
+ ALIAS: A non-primary domain. Known values are: "UNSPECIFIED",
+ "DEFAULT", "PRIMARY", and "ALIAS".
+ "wildcard": bool, # Optional.
+ Indicates whether the domain includes all sub-domains, in
+ addition to the given domain.
+ "zone": "str" # Optional. Optional.
+ If the domain uses DigitalOcean DNS and you would like App
+ Platform to automatically manage it for you, set this to the
+ name of the domain on your account. For example, If the
+ domain you are adding is ``app.domain.com``"" , the zone
+ could be ``domain.com``.
}
],
- "source_dir": "str" # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- }
- ],
- "ingress": {
- "rules": [
- {
- "component": {
- "name": "str", # The name of the
- component to route to. Required.
- "preserve_path_prefix": "str", #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``. Note: this is not applicable for Functions
- Components and is mutually exclusive with ``rewrite``.
- "rewrite": "str" # Optional. An
- optional field that will rewrite the path of the component to
- be what is specified here. By default, the HTTP request path
- will be trimmed from the left when forwarded to the
- component. For example, a component with ``path=/api`` will
- have requests to ``/api/list`` trimmed to ``/list``. If you
- specified the rewrite to be ``/v1/``"" , requests to
- ``/api/list`` would be rewritten to ``/v1/list``. Note: this
- is mutually exclusive with ``preserve_path_prefix``.
- },
- "cors": {
- "allow_credentials": bool, #
- Optional. Whether browsers should expose the response to the
- client-side JavaScript code when the request"u2019s
- credentials mode is include. This configures the
- ``Access-Control-Allow-Credentials`` header.
- "allow_headers": [
- "str" # Optional. The set of
- allowed HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
+ "egress": {
+ "type": "AUTOASSIGN" # Optional. Default
+ value is "AUTOASSIGN". The app egress type. Known values are:
+ "AUTOASSIGN" and "DEDICATED_IP".
+ },
+ "enhanced_threat_control_enabled": False, #
+ Optional. Default value is False. If set to ``true``"" , suspicious
+ requests will go through additional security checks to help mitigate
+ layer 7 DDoS attacks.
+ "functions": [
+ {
+ "name": "str", # The name. Must be
+ unique across all components within the same app. Required.
+ "alerts": [
+ {
+ "disabled": bool, #
+ Optional. Is the alert disabled?.
+ "operator":
+ "UNSPECIFIED_OPERATOR", # Optional. Default value is
+ "UNSPECIFIED_OPERATOR". Known values are:
+ "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
+ "LESS_THAN".
+ "rule":
+ "UNSPECIFIED_RULE", # Optional. Default value is
+ "UNSPECIFIED_RULE". Known values are:
+ "UNSPECIFIED_RULE", "CPU_UTILIZATION",
+ "MEM_UTILIZATION", "RESTART_COUNT",
+ "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE",
+ "DOMAIN_FAILED", "DOMAIN_LIVE", "AUTOSCALE_FAILED",
+ "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT",
+ "FUNCTIONS_AVERAGE_DURATION_MS",
+ "FUNCTIONS_ERROR_RATE_PER_MINUTE",
+ "FUNCTIONS_AVERAGE_WAIT_TIME_MS",
+ "FUNCTIONS_ERROR_COUNT", and
+ "FUNCTIONS_GB_RATE_PER_SECOND".
+ "value": 0.0, #
+ Optional. Threshold value for alert.
+ "window":
+ "UNSPECIFIED_WINDOW" # Optional. Default value is
+ "UNSPECIFIED_WINDOW". Known values are:
+ "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
+ "THIRTY_MINUTES", and "ONE_HOUR".
+ }
],
- "allow_methods": [
- "str" # Optional. The set of
- allowed HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
],
- "allow_origins": [
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "log_destinations": [
{
- "exact": "str", #
- Optional. Exact string match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set.
- "prefix": "str", #
- Optional. Prefix-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set.
- "regex": "str" #
- Optional. RE2 style regex-based match. Only 1 of
- ``exact``"" , ``prefix``"" , or ``regex`` must be
- set. For more information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
}
],
- "expose_headers": [
- "str" # Optional. The set of
- HTTP response headers that browsers are allowed to
- access. This configures the
- ``Access-Control-Expose-Headers`` header.
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
],
- "max_age": "str" # Optional. An
- optional duration specifying how long browsers can cache the
- results of a preflight request. This configures the
- ``Access-Control-Max-Age`` header.
- },
- "match": {
- "authority": {
- "exact": "str" # Required.
- },
- "path": {
- "prefix": "str" #
- Prefix-based match. For example, ``/api`` will match
- ``/api``"" , ``/api/``"" , and any nested paths such as
- ``/api/v1/endpoint``. Required.
- }
- },
- "redirect": {
- "authority": "str", # Optional. The
- authority/host to redirect to. This can be a hostname or IP
- address. Note: use ``port`` to set the port.
- "port": 0, # Optional. The port to
- redirect to.
- "redirect_code": 0, # Optional. The
- redirect code to use. Defaults to ``302``. Supported values
- are 300, 301, 302, 303, 304, 307, 308.
- "scheme": "str", # Optional. The
- scheme to redirect to. Supported values are ``http`` or
- ``https``. Default: ``https``.
- "uri": "str" # Optional. An optional
- URI path to redirect to. Note: if this is specified the whole
- URI of the original request will be overwritten to this
- value, irrespective of the original request URI being
- matched.
- }
- }
- ]
- },
- "jobs": [
- {
- "autoscaling": {
- "max_instance_count": 0, # Optional. The
- maximum amount of instances for this component. Must be more than
- min_instance_count.
- "metrics": {
- "cpu": {
- "percent": 80 # Optional.
- Default value is 80. The average target CPU utilization
- for the component.
- }
- },
- "min_instance_count": 0 # Optional. The
- minimum amount of instances for this component. Must be less than
- max_instance_count.
- },
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
+ "source_dir": "str" # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
}
],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
+ "ingress": {
+ "rules": [
+ {
+ "component": {
+ "name": "str", # The
+ name of the component to route to. Required.
+ "preserve_path_prefix": "str", # Optional. An
+ optional flag to preserve the path that is forwarded
+ to the backend service. By default, the HTTP request
+ path will be trimmed from the left when forwarded to
+ the component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``. Note: this is not
+ applicable for Functions Components and is mutually
+ exclusive with ``rewrite``.
+ "rewrite": "str" #
+ Optional. An optional field that will rewrite the
+ path of the component to be what is specified here.
+ By default, the HTTP request path will be trimmed
+ from the left when forwarded to the component. For
+ example, a component with ``path=/api`` will have
+ requests to ``/api/list`` trimmed to ``/list``. If
+ you specified the rewrite to be ``/v1/``"" , requests
+ to ``/api/list`` would be rewritten to ``/v1/list``.
+ Note: this is mutually exclusive with
+ ``preserve_path_prefix``.
+ },
+ "cors": {
+ "allow_credentials":
+ bool, # Optional. Whether browsers should expose the
+ response to the client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" #
+ Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" #
+ Optional. The set of allowed HTTP methods. This
+ configures the ``Access-Control-Allow-Methods``
+ header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "prefix": "str", # Optional. Prefix-based
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For
+ more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" #
+ Optional. The set of HTTP response headers that
+ browsers are allowed to access. This configures
+ the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" #
+ Optional. An optional duration specifying how long
+ browsers can cache the results of a preflight
+ request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "match": {
+ "authority": {
+ "exact":
+ "str" # Required.
+ },
+ "path": {
+ "prefix":
+ "str" # Prefix-based match. For example,
+ ``/api`` will match ``/api``"" , ``/api/``"" ,
+ and any nested paths such as
+ ``/api/v1/endpoint``. Required.
+ }
+ },
+ "redirect": {
+ "authority": "str",
+ # Optional. The authority/host to redirect to. This
+ can be a hostname or IP address. Note: use ``port``
+ to set the port.
+ "port": 0, #
+ Optional. The port to redirect to.
+ "redirect_code": 0,
+ # Optional. The redirect code to use. Defaults to
+ ``302``. Supported values are 300, 301, 302, 303,
+ 304, 307, 308.
+ "scheme": "str", #
+ Optional. The scheme to redirect to. Supported values
+ are ``http`` or ``https``. Default: ``https``.
+ "uri": "str" #
+ Optional. An optional URI path to redirect to. Note:
+ if this is specified the whole URI of the original
+ request will be overwritten to this value,
+ irrespective of the original request URI being
+ matched.
+ }
+ }
+ ]
},
- "instance_count": 1, # Optional. Default value is 1.
- The amount of instances that this component should be scaled to.
- Default: 1. Must not be set if autoscaling is used.
- "instance_size_slug": {},
- "kind": "UNSPECIFIED", # Optional. Default value is
- "UNSPECIFIED". * UNSPECIFIED: Default job type, will auto-complete to
- POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job that runs before an
- app deployment. * POST_DEPLOY: Indicates a job that runs after an app
- deployment. * FAILED_DEPLOY: Indicates a job that runs after a
- component fails to deploy. Known values are: "UNSPECIFIED",
- "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY".
- "log_destinations": [
+ "jobs": [
{
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
},
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
},
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
},
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
},
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
- }
- }
- ],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str", # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- "termination": {
- "grace_period_seconds": 0 # Optional. The
- number of seconds to wait between sending a TERM signal to a
- container and issuing a KILL which causes immediate shutdown.
- (Default 120).
- }
- }
- ],
- "maintenance": {
- "archive": bool, # Optional. Indicates whether the app
- should be archived. Setting this to true implies that enabled is set to
- true.
- "enabled": bool, # Optional. Indicates whether maintenance
- mode should be enabled for the app.
- "offline_page_url": "str" # Optional. A custom offline page
- to display when maintenance mode is enabled or the app is archived.
- },
- "region": "str", # Optional. The slug form of the geographical
- origin of the app. Default: ``nearest available``. Known values are: "atl",
- "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", "sgp", and "syd".
- "services": [
- {
- "autoscaling": {
- "max_instance_count": 0, # Optional. The
- maximum amount of instances for this component. Must be more than
- min_instance_count.
- "metrics": {
- "cpu": {
- "percent": 80 # Optional.
- Default value is 80. The average target CPU utilization
- for the component.
- }
- },
- "min_instance_count": 0 # Optional. The
- minimum amount of instances for this component. Must be less than
- max_instance_count.
- },
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "cors": {
- "allow_credentials": bool, # Optional.
- Whether browsers should expose the response to the client-side
- JavaScript code when the request"u2019s credentials mode is
- include. This configures the ``Access-Control-Allow-Credentials``
- header.
- "allow_headers": [
- "str" # Optional. The set of allowed
- HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of allowed
- HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
- ],
- "allow_origins": [
- {
- "exact": "str", # Optional.
- Exact string match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "prefix": "str", # Optional.
- Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "regex": "str" # Optional.
- RE2 style regex-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set. For more
- information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "kind": "UNSPECIFIED", # Optional.
+ Default value is "UNSPECIFIED". * UNSPECIFIED: Default job
+ type, will auto-complete to POST_DEPLOY kind. * PRE_DEPLOY:
+ Indicates a job that runs before an app deployment. *
+ POST_DEPLOY: Indicates a job that runs after an app
+ deployment. * FAILED_DEPLOY: Indicates a job that runs after
+ a component fails to deploy. Known values are: "UNSPECIFIED",
+ "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY".
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
}
- ],
- "expose_headers": [
- "str" # Optional. The set of HTTP
- response headers that browsers are allowed to access. This
- configures the ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An optional
- duration specifying how long browsers can cache the results of a
- preflight request. This configures the ``Access-Control-Max-Age``
- header.
- },
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
}
],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "health_check": {
- "failure_threshold": 0, # Optional. The
- number of failed health checks before considered unhealthy.
- "http_path": "str", # Optional. The route
- path used for the HTTP health check ping. If not set, the HTTP
- health check will be disabled and a TCP health check used
- instead.
- "initial_delay_seconds": 0, # Optional. The
- number of seconds to wait before beginning health checks.
- "period_seconds": 0, # Optional. The number
- of seconds to wait between health checks.
- "port": 0, # Optional. The port on which the
- health check will be performed. If not set, the health check will
- be performed on the component's http_port.
- "success_threshold": 0, # Optional. The
- number of successful health checks before considered healthy.
- "timeout_seconds": 0 # Optional. The number
- of seconds after which the check times out.
- },
- "http_port": 0, # Optional. The internal port on
- which this service's run command will listen. Default: 8080 If there
- is not an environment variable with the name ``PORT``"" , one will be
- automatically added with its value set to the value of this field.
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
- },
- "instance_count": 1, # Optional. Default value is 1.
- The amount of instances that this component should be scaled to.
- Default: 1. Must not be set if autoscaling is used.
- "instance_size_slug": {},
- "internal_ports": [
- 0 # Optional. The ports on which this
- service will listen for internal traffic.
- ],
- "liveness_health_check": {
- "failure_threshold": 0, # Optional. The
- number of failed health checks before considered unhealthy.
- "http_path": "str", # Optional. The route
- path used for the HTTP health check ping. If not set, the HTTP
- health check will be disabled and a TCP health check used
- instead.
- "initial_delay_seconds": 0, # Optional. The
- number of seconds to wait before beginning health checks.
- "period_seconds": 0, # Optional. The number
- of seconds to wait between health checks.
- "port": 0, # Optional. The port on which the
- health check will be performed.
- "success_threshold": 0, # Optional. The
- number of successful health checks before considered healthy.
- "timeout_seconds": 0 # Optional. The number
- of seconds after which the check times out.
+ "maintenance": {
+ "archive": bool, # Optional. Indicates
+ whether the app should be archived. Setting this to true implies
+ that enabled is set to true.
+ "enabled": bool, # Optional. Indicates
+ whether maintenance mode should be enabled for the app.
+ "offline_page_url": "str" # Optional. A
+ custom offline page to display when maintenance mode is enabled
+ or the app is archived.
},
- "log_destinations": [
+ "region": "str", # Optional. The slug form of the
+ geographical origin of the app. Default: ``nearest available``. Known
+ values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr",
+ "sgp", and "syd".
+ "services": [
{
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
},
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
},
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed. If not
+ set, the health check will be performed on the
+ component's http_port.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "http_port": 0, # Optional. The
+ internal port on which this service's run command will
+ listen. Default: 8080 If there is not an environment variable
+ with the name ``PORT``"" , one will be automatically added
+ with its value set to the value of this field.
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
},
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
},
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "internal_ports": [
+ 0 # Optional. The ports on
+ which this service will listen for internal traffic.
+ ],
+ "liveness_health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "protocol": "str", # Optional. The
+ protocol which the service uses to serve traffic on the
+ http_port. * ``HTTP``"" : The app is serving the HTTP
+ protocol. Default. * ``HTTP2``"" : The app is serving the
+ HTTP/2 protocol. Currently, this needs to be implemented in
+ the service by serving HTTP/2 cleartext (h2c). Known values
+ are: "HTTP" and "HTTP2".
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
+ ],
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "drain_seconds": 0, #
+ Optional. The number of seconds to wait between selecting
+ a container instance for termination and issuing the TERM
+ signal. Selecting a container instance for termination
+ begins an asynchronous drain of new requests on upstream
+ load-balancers. (Default 15).
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
}
}
],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "protocol": "str", # Optional. The protocol which
- the service uses to serve traffic on the http_port. * ``HTTP``"" :
- The app is serving the HTTP protocol. Default. * ``HTTP2``"" : The
- app is serving the HTTP/2 protocol. Currently, this needs to be
- implemented in the service by serving HTTP/2 cleartext (h2c). Known
- values are: "HTTP" and "HTTP2".
- "routes": [
+ "static_sites": [
{
- "path": "str", # Optional.
- (Deprecated - Use Ingress Rules instead). An HTTP path
- prefix. Paths must start with / and must be unique across all
- components within an app.
- "preserve_path_prefix": bool #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``.
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "catchall_document": "str", #
+ Optional. The name of the document to use as the fallback for
+ any requests to documents that are not found when serving
+ this static site. Only 1 of ``catchall_document`` or
+ ``error_document`` can be set.
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "error_document": "404.html", #
+ Optional. Default value is "404.html". The name of the error
+ document to use when serving this static site. Default:
+ 404.html. If no such file exists within the built assets, App
+ Platform will supply one.
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
+ },
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
+ },
+ "index_document": "index.html", #
+ Optional. Default value is "index.html". The name of the
+ index document to use when serving this static site. Default:
+ index.html.
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "output_dir": "str", # Optional. An
+ optional path to where the built assets will be located,
+ relative to the build context. If not set, App Platform will
+ automatically scan for these directory names: ``_static``"" ,
+ ``dist``"" , ``public``"" , ``build``.
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
+ ],
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str" # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
}
],
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str", # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- "termination": {
- "drain_seconds": 0, # Optional. The number
- of seconds to wait between selecting a container instance for
- termination and issuing the TERM signal. Selecting a container
- instance for termination begins an asynchronous drain of new
- requests on upstream load-balancers. (Default 15).
- "grace_period_seconds": 0 # Optional. The
- number of seconds to wait between sending a TERM signal to a
- container and issuing a KILL which causes immediate shutdown.
- (Default 120).
- }
- }
- ],
- "static_sites": [
- {
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "catchall_document": "str", # Optional. The name of
- the document to use as the fallback for any requests to documents
- that are not found when serving this static site. Only 1 of
- ``catchall_document`` or ``error_document`` can be set.
- "cors": {
- "allow_credentials": bool, # Optional.
- Whether browsers should expose the response to the client-side
- JavaScript code when the request"u2019s credentials mode is
- include. This configures the ``Access-Control-Allow-Credentials``
- header.
- "allow_headers": [
- "str" # Optional. The set of allowed
- HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of allowed
- HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
- ],
- "allow_origins": [
+ "vpc": {
+ "egress_ips": [
{
- "exact": "str", # Optional.
- Exact string match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "prefix": "str", # Optional.
- Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "regex": "str" # Optional.
- RE2 style regex-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set. For more
- information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
+ "ip": "str" # Optional. The
+ egress ips associated with the VPC.
}
],
- "expose_headers": [
- "str" # Optional. The set of HTTP
- response headers that browsers are allowed to access. This
- configures the ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An optional
- duration specifying how long browsers can cache the results of a
- preflight request. This configures the ``Access-Control-Max-Age``
- header.
- },
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
- }
- ],
- "error_document": "404.html", # Optional. Default
- value is "404.html". The name of the error document to use when
- serving this static site. Default: 404.html. If no such file exists
- within the built assets, App Platform will supply one.
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
+ "id": "str" # Optional. The ID of the VPC.
},
- "index_document": "index.html", # Optional. Default
- value is "index.html". The name of the index document to use when
- serving this static site. Default: index.html.
- "log_destinations": [
+ "workers": [
{
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
},
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
},
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
- },
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
},
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
- }
- }
- ],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "output_dir": "str", # Optional. An optional path to
- where the built assets will be located, relative to the build
- context. If not set, App Platform will automatically scan for these
- directory names: ``_static``"" , ``dist``"" , ``public``"" ,
- ``build``.
- "routes": [
- {
- "path": "str", # Optional.
- (Deprecated - Use Ingress Rules instead). An HTTP path
- prefix. Paths must start with / and must be unique across all
- components within an app.
- "preserve_path_prefix": bool #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``.
- }
- ],
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str" # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- }
- ],
- "vpc": {
- "egress_ips": [
- {
- "ip": "str" # Optional. The egress ips
- associated with the VPC.
- }
- ],
- "id": "str" # Optional. The ID of the VPC.
- },
- "workers": [
- {
- "autoscaling": {
- "max_instance_count": 0, # Optional. The
- maximum amount of instances for this component. Must be more than
- min_instance_count.
- "metrics": {
- "cpu": {
- "percent": 80 # Optional.
- Default value is 80. The average target CPU utilization
- for the component.
- }
- },
- "min_instance_count": 0 # Optional. The
- minimum amount of instances for this component. Must be less than
- max_instance_count.
- },
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
- }
- ],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
- },
- "instance_count": 1, # Optional. Default value is 1.
- The amount of instances that this component should be scaled to.
- Default: 1. Must not be set if autoscaling is used.
- "instance_size_slug": {},
- "liveness_health_check": {
- "failure_threshold": 0, # Optional. The
- number of failed health checks before considered unhealthy.
- "http_path": "str", # Optional. The route
- path used for the HTTP health check ping. If not set, the HTTP
- health check will be disabled and a TCP health check used
- instead.
- "initial_delay_seconds": 0, # Optional. The
- number of seconds to wait before beginning health checks.
- "period_seconds": 0, # Optional. The number
- of seconds to wait between health checks.
- "port": 0, # Optional. The port on which the
- health check will be performed.
- "success_threshold": 0, # Optional. The
- number of successful health checks before considered healthy.
- "timeout_seconds": 0 # Optional. The number
- of seconds after which the check times out.
- },
- "log_destinations": [
- {
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
},
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
},
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
},
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
},
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "liveness_health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
}
}
- ],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str", # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- "termination": {
- "grace_period_seconds": 0 # Optional. The
- number of seconds to wait between sending a TERM signal to a
- container and issuing a KILL which causes immediate shutdown.
- (Default 120).
+ ]
+ },
+ "static_sites": [
+ {
+ "name": "str", # Optional. The name of this
+ static site.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this static
+ site.
}
- }
- ]
+ ],
+ "tier_slug": "str", # Optional. The current pricing tier
+ slug of the deployment.
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the
+ deployment was last updated.
+ "workers": [
+ {
+ "name": "str", # Optional. The name of this
+ worker.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this worker.
+ }
+ ]
+ },
+ "deployment_id": "str", # Optional. For deployment events, this is
+ the same as the deployment's ID. For autoscaling events, this is the
+ deployment that was autoscaled.
+ "id": "str", # Optional. The ID of the event (UUID).
+ "type": "str" # Optional. The type of event. Known values are:
+ "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING".
}
}
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
- @overload
- def validate_app_spec(
- self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
- ) -> JSON:
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_get_event_request(
+ app_id=app_id,
+ event_id=event_id,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace
+ def cancel_event(self, app_id: str, event_id: str, **kwargs: Any) -> JSON:
# pylint: disable=line-too-long
- """Propose an App Spec.
+ """Cancel an Event.
- To propose and validate a spec for a new or existing app, send a POST request to the
- ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app,
- including app cost and upgrade cost. If an existing app ID is specified, the app spec is
- treated as a proposed update to the existing app.
+ Cancel an in-progress autoscaling event.
- :param body: Required.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
+ :param app_id: The app ID. Required.
+ :type app_id: str
+ :param event_id: The event ID. Required.
+ :type event_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -102905,1307 +104260,1706 @@ def validate_app_spec(
# response body for status code(s): 200
response == {
- "app_cost": 0, # Optional. The monthly cost of the proposed app in USD.
- "app_is_static": bool, # Optional. Indicates whether the app is a static
- app.
- "app_name_available": bool, # Optional. Indicates whether the app name is
- available.
- "app_name_suggestion": "str", # Optional. The suggested name if the proposed
- app name is unavailable.
- "app_tier_downgrade_cost": 0, # Optional. The monthly cost of the proposed
- app in USD using the previous pricing plan tier. For example, if you propose an
- app that uses the Professional tier, the ``app_tier_downgrade_cost`` field
- displays the monthly cost of the app if it were to use the Basic tier. If the
- proposed app already uses the lest expensive tier, the field is empty.
- "existing_static_apps": "str", # Optional. The maximum number of free static
- apps the account can have. We will charge you for any additional static apps.
- "spec": {
- "name": "str", # The name of the app. Must be unique across all apps
- in the same account. Required.
- "databases": [
- {
- "name": "str", # The database's name. The name must
- be unique across all components within the same app and cannot use
- capital letters. Required.
- "cluster_name": "str", # Optional. The name of the
- underlying DigitalOcean DBaaS cluster. This is required for
- production databases. For dev databases, if cluster_name is not set,
- a new cluster will be provisioned.
- "db_name": "str", # Optional. The name of the MySQL
- or PostgreSQL database to configure.
- "db_user": "str", # Optional. The name of the MySQL
- or PostgreSQL user to configure.
- "engine": "UNSET", # Optional. Default value is
- "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB:
- MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey.
- Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB",
- "KAFKA", "OPENSEARCH", and "VALKEY".
- "production": bool, # Optional. Whether this is a
- production or dev database.
- "version": "str" # Optional. The version of the
- database engine.
- }
- ],
- "disable_edge_cache": False, # Optional. Default value is False. ..
- role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app
- will **not** be cached at the edge (CDN). Enable this option if you want to
- manage CDN configuration yourself"u2014whether by using an external CDN
- provider or by handling static content and caching within your app. This
- setting is also recommended for apps that require real-time data or serve
- dynamic content, such as those using Server-Sent Events (SSE) over GET, or
- hosting an MCP (Model Context Protocol) Server that utilizes SSE.""
- :raw-html-m2r:`
` **Note:** This feature is not available for static site
- components."" :raw-html-m2r:`
` For more information, see `Disable CDN
- Cache
- `_.
- "disable_email_obfuscation": False, # Optional. Default value is
- False. If set to ``true``"" , email addresses in the app will not be
- obfuscated. This is useful for apps that require email addresses to be
- visible (in the HTML markup).
- "domains": [
- {
- "domain": "str", # The hostname for the domain.
- Required.
- "minimum_tls_version": "str", # Optional. The
- minimum version of TLS a client application can use to access
- resources for the domain. Must be one of the following values
- wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are:
- "1.2" and "1.3".
- "type": "UNSPECIFIED", # Optional. Default value is
- "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain
- assigned to this app * PRIMARY: The primary domain for this app that
- is displayed as the default in the control panel, used in bindable
- environment variables, and any other places that reference an app's
- live URL. Only one domain may be set as primary. * ALIAS: A
- non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT",
- "PRIMARY", and "ALIAS".
- "wildcard": bool, # Optional. Indicates whether the
- domain includes all sub-domains, in addition to the given domain.
- "zone": "str" # Optional. Optional. If the domain
- uses DigitalOcean DNS and you would like App Platform to
- automatically manage it for you, set this to the name of the domain
- on your account. For example, If the domain you are adding is
- ``app.domain.com``"" , the zone could be ``domain.com``.
- }
- ],
- "egress": {
- "type": "AUTOASSIGN" # Optional. Default value is
- "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and
- "DEDICATED_IP".
+ "event": {
+ "autoscaling": {
+ "components": {
+ "str": {
+ "from": 0, # Optional. The number of
+ replicas before scaling.
+ "to": 0, # Optional. The number of replicas
+ after scaling.
+ "triggering_metric": "str" # Optional. The
+ metric that triggered the scale change. Known values are "cpu",
+ "requests_per_second", "request_duration". For inactivity sleep,
+ "scale_from_zero" and "scale_to_zero" are used.
+ }
+ },
+ "phase": "str" # Optional. The current phase of the
+ autoscaling event. Known values are: "UNKNOWN", "PENDING", "IN_PROGRESS",
+ "SUCCEEDED", "FAILED", and "CANCELED".
},
- "enhanced_threat_control_enabled": False, # Optional. Default value
- is False. If set to ``true``"" , suspicious requests will go through
- additional security checks to help mitigate layer 7 DDoS attacks.
- "functions": [
- {
- "name": "str", # The name. Must be unique across all
- components within the same app. Required.
- "alerts": [
+ "created_at": "2020-02-20 00:00:00", # Optional. When the event was
+ created.
+ "deployment": {
+ "cause": "str", # Optional. What caused this deployment to
+ be created.
+ "cloned_from": "str", # Optional. The ID of a previous
+ deployment that this deployment was cloned from.
+ "created_at": "2020-02-20 00:00:00", # Optional. The
+ creation time of the deployment.
+ "functions": [
+ {
+ "name": "str", # Optional. The name of this
+ functions component.
+ "namespace": "str", # Optional. The
+ namespace where the functions are deployed.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this
+ functions component.
+ }
+ ],
+ "id": "str", # Optional. The ID of the deployment.
+ "jobs": [
+ {
+ "name": "str", # Optional. The name of this
+ job.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this job.
+ }
+ ],
+ "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN".
+ Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING",
+ "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and
+ "CANCELED".
+ "phase_last_updated_at": "2020-02-20 00:00:00", # Optional.
+ When the deployment phase was last updated.
+ "progress": {
+ "error_steps": 0, # Optional. Number of unsuccessful
+ steps.
+ "pending_steps": 0, # Optional. Number of pending
+ steps.
+ "running_steps": 0, # Optional. Number of currently
+ running steps.
+ "steps": [
{
- "disabled": bool, # Optional. Is the
- alert disabled?.
- "operator": "UNSPECIFIED_OPERATOR",
- # Optional. Default value is "UNSPECIFIED_OPERATOR". Known
- values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
- "LESS_THAN".
- "rule": "UNSPECIFIED_RULE", #
- Optional. Default value is "UNSPECIFIED_RULE". Known values
- are: "UNSPECIFIED_RULE", "CPU_UTILIZATION",
- "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED",
- "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE",
- "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED",
- "FUNCTIONS_ACTIVATION_COUNT",
- "FUNCTIONS_AVERAGE_DURATION_MS",
- "FUNCTIONS_ERROR_RATE_PER_MINUTE",
- "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT",
- and "FUNCTIONS_GB_RATE_PER_SECOND".
- "value": 0.0, # Optional. Threshold
- value for alert.
- "window": "UNSPECIFIED_WINDOW" #
- Optional. Default value is "UNSPECIFIED_WINDOW". Known values
- are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
- "THIRTY_MINUTES", and "ONE_HOUR".
+ "component_name": "str", # Optional.
+ The component name that this step is associated with.
+ "ended_at": "2020-02-20 00:00:00", #
+ Optional. The end time of this step.
+ "message_base": "str", # Optional.
+ The base of a human-readable description of the step intended
+ to be combined with the component name for presentation. For
+ example: ``message_base`` = "Building service"
+ ``component_name`` = "api".
+ "name": "str", # Optional. The name
+ of this step.
+ "reason": {
+ "code": "str", # Optional.
+ The error code.
+ "message": "str" # Optional.
+ The error message.
+ },
+ "started_at": "2020-02-20 00:00:00",
+ # Optional. The start time of this step.
+ "status": "UNKNOWN", # Optional.
+ Default value is "UNKNOWN". Known values are: "UNKNOWN",
+ "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child steps
+ of this step.
+ ]
}
],
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "cors": {
- "allow_credentials": bool, # Optional.
- Whether browsers should expose the response to the client-side
- JavaScript code when the request"u2019s credentials mode is
- include. This configures the ``Access-Control-Allow-Credentials``
- header.
- "allow_headers": [
- "str" # Optional. The set of allowed
- HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of allowed
- HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
- ],
- "allow_origins": [
- {
- "exact": "str", # Optional.
- Exact string match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "prefix": "str", # Optional.
- Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "regex": "str" # Optional.
- RE2 style regex-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set. For more
- information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
- }
- ],
- "expose_headers": [
- "str" # Optional. The set of HTTP
- response headers that browsers are allowed to access. This
- configures the ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An optional
- duration specifying how long browsers can cache the results of a
- preflight request. This configures the ``Access-Control-Max-Age``
- header.
- },
- "envs": [
+ "success_steps": 0, # Optional. Number of successful
+ steps.
+ "summary_steps": [
{
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
+ "component_name": "str", # Optional.
+ The component name that this step is associated with.
+ "ended_at": "2020-02-20 00:00:00", #
+ Optional. The end time of this step.
+ "message_base": "str", # Optional.
+ The base of a human-readable description of the step intended
+ to be combined with the component name for presentation. For
+ example: ``message_base`` = "Building service"
+ ``component_name`` = "api".
+ "name": "str", # Optional. The name
+ of this step.
+ "reason": {
+ "code": "str", # Optional.
+ The error code.
+ "message": "str" # Optional.
+ The error message.
+ },
+ "started_at": "2020-02-20 00:00:00",
+ # Optional. The start time of this step.
+ "status": "UNKNOWN", # Optional.
+ Default value is "UNKNOWN". Known values are: "UNKNOWN",
+ "PENDING", "RUNNING", "ERROR", and "SUCCESS".
+ "steps": [
+ {} # Optional. Child steps
+ of this step.
+ ]
}
],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "log_destinations": [
+ "total_steps": 0 # Optional. Total number of steps.
+ },
+ "services": [
+ {
+ "name": "str", # Optional. The name of this
+ service.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this
+ service.
+ }
+ ],
+ "spec": {
+ "name": "str", # The name of the app. Must be unique
+ across all apps in the same account. Required.
+ "databases": [
{
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
- },
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
- },
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
- },
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
- },
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
- }
+ "name": "str", # The database's
+ name. The name must be unique across all components within
+ the same app and cannot use capital letters. Required.
+ "cluster_name": "str", # Optional.
+ The name of the underlying DigitalOcean DBaaS cluster. This
+ is required for production databases. For dev databases, if
+ cluster_name is not set, a new cluster will be provisioned.
+ "db_name": "str", # Optional. The
+ name of the MySQL or PostgreSQL database to configure.
+ "db_user": "str", # Optional. The
+ name of the MySQL or PostgreSQL user to configure.
+ "engine": "UNSET", # Optional.
+ Default value is "UNSET". * MYSQL: MySQL * PG: PostgreSQL *
+ REDIS: Caching * MONGODB: MongoDB * KAFKA: Kafka *
+ OPENSEARCH: OpenSearch * VALKEY: ValKey. Known values are:
+ "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", "KAFKA",
+ "OPENSEARCH", and "VALKEY".
+ "production": bool, # Optional.
+ Whether this is a production or dev database.
+ "version": "str" # Optional. The
+ version of the database engine.
}
],
- "routes": [
+ "disable_edge_cache": False, # Optional. Default
+ value is False. .. role:: raw-html-m2r(raw) :format: html If set
+ to ``true``"" , the app will **not** be cached at the edge (CDN).
+ Enable this option if you want to manage CDN configuration
+ yourself"u2014whether by using an external CDN provider or by
+ handling static content and caching within your app. This setting is
+ also recommended for apps that require real-time data or serve
+ dynamic content, such as those using Server-Sent Events (SSE) over
+ GET, or hosting an MCP (Model Context Protocol) Server that utilizes
+ SSE."" :raw-html-m2r:`
` **Note:** This feature is not available
+ for static site components."" :raw-html-m2r:`
` For more
+ information, see `Disable CDN Cache
+ `_.
+ "disable_email_obfuscation": False, # Optional.
+ Default value is False. If set to ``true``"" , email addresses in the
+ app will not be obfuscated. This is useful for apps that require
+ email addresses to be visible (in the HTML markup).
+ "domains": [
{
- "path": "str", # Optional.
- (Deprecated - Use Ingress Rules instead). An HTTP path
- prefix. Paths must start with / and must be unique across all
- components within an app.
- "preserve_path_prefix": bool #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``.
+ "domain": "str", # The hostname for
+ the domain. Required.
+ "minimum_tls_version": "str", #
+ Optional. The minimum version of TLS a client application can
+ use to access resources for the domain. Must be one of the
+ following values wrapped within quotations: ``"1.2"`` or
+ ``"1.3"``. Known values are: "1.2" and "1.3".
+ "type": "UNSPECIFIED", # Optional.
+ Default value is "UNSPECIFIED". * DEFAULT: The default
+ ``.ondigitalocean.app`` domain assigned to this app *
+ PRIMARY: The primary domain for this app that is displayed as
+ the default in the control panel, used in bindable
+ environment variables, and any other places that reference an
+ app's live URL. Only one domain may be set as primary. *
+ ALIAS: A non-primary domain. Known values are: "UNSPECIFIED",
+ "DEFAULT", "PRIMARY", and "ALIAS".
+ "wildcard": bool, # Optional.
+ Indicates whether the domain includes all sub-domains, in
+ addition to the given domain.
+ "zone": "str" # Optional. Optional.
+ If the domain uses DigitalOcean DNS and you would like App
+ Platform to automatically manage it for you, set this to the
+ name of the domain on your account. For example, If the
+ domain you are adding is ``app.domain.com``"" , the zone
+ could be ``domain.com``.
}
],
- "source_dir": "str" # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- }
- ],
- "ingress": {
- "rules": [
- {
- "component": {
- "name": "str", # The name of the
- component to route to. Required.
- "preserve_path_prefix": "str", #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``. Note: this is not applicable for Functions
- Components and is mutually exclusive with ``rewrite``.
- "rewrite": "str" # Optional. An
- optional field that will rewrite the path of the component to
- be what is specified here. By default, the HTTP request path
- will be trimmed from the left when forwarded to the
- component. For example, a component with ``path=/api`` will
- have requests to ``/api/list`` trimmed to ``/list``. If you
- specified the rewrite to be ``/v1/``"" , requests to
- ``/api/list`` would be rewritten to ``/v1/list``. Note: this
- is mutually exclusive with ``preserve_path_prefix``.
- },
- "cors": {
- "allow_credentials": bool, #
- Optional. Whether browsers should expose the response to the
- client-side JavaScript code when the request"u2019s
- credentials mode is include. This configures the
- ``Access-Control-Allow-Credentials`` header.
- "allow_headers": [
- "str" # Optional. The set of
- allowed HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
+ "egress": {
+ "type": "AUTOASSIGN" # Optional. Default
+ value is "AUTOASSIGN". The app egress type. Known values are:
+ "AUTOASSIGN" and "DEDICATED_IP".
+ },
+ "enhanced_threat_control_enabled": False, #
+ Optional. Default value is False. If set to ``true``"" , suspicious
+ requests will go through additional security checks to help mitigate
+ layer 7 DDoS attacks.
+ "functions": [
+ {
+ "name": "str", # The name. Must be
+ unique across all components within the same app. Required.
+ "alerts": [
+ {
+ "disabled": bool, #
+ Optional. Is the alert disabled?.
+ "operator":
+ "UNSPECIFIED_OPERATOR", # Optional. Default value is
+ "UNSPECIFIED_OPERATOR". Known values are:
+ "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
+ "LESS_THAN".
+ "rule":
+ "UNSPECIFIED_RULE", # Optional. Default value is
+ "UNSPECIFIED_RULE". Known values are:
+ "UNSPECIFIED_RULE", "CPU_UTILIZATION",
+ "MEM_UTILIZATION", "RESTART_COUNT",
+ "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE",
+ "DOMAIN_FAILED", "DOMAIN_LIVE", "AUTOSCALE_FAILED",
+ "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT",
+ "FUNCTIONS_AVERAGE_DURATION_MS",
+ "FUNCTIONS_ERROR_RATE_PER_MINUTE",
+ "FUNCTIONS_AVERAGE_WAIT_TIME_MS",
+ "FUNCTIONS_ERROR_COUNT", and
+ "FUNCTIONS_GB_RATE_PER_SECOND".
+ "value": 0.0, #
+ Optional. Threshold value for alert.
+ "window":
+ "UNSPECIFIED_WINDOW" # Optional. Default value is
+ "UNSPECIFIED_WINDOW". Known values are:
+ "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
+ "THIRTY_MINUTES", and "ONE_HOUR".
+ }
],
- "allow_methods": [
- "str" # Optional. The set of
- allowed HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
],
- "allow_origins": [
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "log_destinations": [
{
- "exact": "str", #
- Optional. Exact string match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set.
- "prefix": "str", #
- Optional. Prefix-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set.
- "regex": "str" #
- Optional. RE2 style regex-based match. Only 1 of
- ``exact``"" , ``prefix``"" , or ``regex`` must be
- set. For more information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
}
],
- "expose_headers": [
- "str" # Optional. The set of
- HTTP response headers that browsers are allowed to
- access. This configures the
- ``Access-Control-Expose-Headers`` header.
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
],
- "max_age": "str" # Optional. An
- optional duration specifying how long browsers can cache the
- results of a preflight request. This configures the
- ``Access-Control-Max-Age`` header.
- },
- "match": {
- "authority": {
- "exact": "str" # Required.
- },
- "path": {
- "prefix": "str" #
- Prefix-based match. For example, ``/api`` will match
- ``/api``"" , ``/api/``"" , and any nested paths such as
- ``/api/v1/endpoint``. Required.
- }
- },
- "redirect": {
- "authority": "str", # Optional. The
- authority/host to redirect to. This can be a hostname or IP
- address. Note: use ``port`` to set the port.
- "port": 0, # Optional. The port to
- redirect to.
- "redirect_code": 0, # Optional. The
- redirect code to use. Defaults to ``302``. Supported values
- are 300, 301, 302, 303, 304, 307, 308.
- "scheme": "str", # Optional. The
- scheme to redirect to. Supported values are ``http`` or
- ``https``. Default: ``https``.
- "uri": "str" # Optional. An optional
- URI path to redirect to. Note: if this is specified the whole
- URI of the original request will be overwritten to this
- value, irrespective of the original request URI being
- matched.
- }
- }
- ]
- },
- "jobs": [
- {
- "autoscaling": {
- "max_instance_count": 0, # Optional. The
- maximum amount of instances for this component. Must be more than
- min_instance_count.
- "metrics": {
- "cpu": {
- "percent": 80 # Optional.
- Default value is 80. The average target CPU utilization
- for the component.
- }
- },
- "min_instance_count": 0 # Optional. The
- minimum amount of instances for this component. Must be less than
- max_instance_count.
- },
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
+ "source_dir": "str" # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
}
],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
+ "ingress": {
+ "rules": [
+ {
+ "component": {
+ "name": "str", # The
+ name of the component to route to. Required.
+ "preserve_path_prefix": "str", # Optional. An
+ optional flag to preserve the path that is forwarded
+ to the backend service. By default, the HTTP request
+ path will be trimmed from the left when forwarded to
+ the component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``. Note: this is not
+ applicable for Functions Components and is mutually
+ exclusive with ``rewrite``.
+ "rewrite": "str" #
+ Optional. An optional field that will rewrite the
+ path of the component to be what is specified here.
+ By default, the HTTP request path will be trimmed
+ from the left when forwarded to the component. For
+ example, a component with ``path=/api`` will have
+ requests to ``/api/list`` trimmed to ``/list``. If
+ you specified the rewrite to be ``/v1/``"" , requests
+ to ``/api/list`` would be rewritten to ``/v1/list``.
+ Note: this is mutually exclusive with
+ ``preserve_path_prefix``.
+ },
+ "cors": {
+ "allow_credentials":
+ bool, # Optional. Whether browsers should expose the
+ response to the client-side JavaScript code when the
+ request"u2019s credentials mode is include. This
+ configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" #
+ Optional. The set of allowed HTTP request
+ headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" #
+ Optional. The set of allowed HTTP methods. This
+ configures the ``Access-Control-Allow-Methods``
+ header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional. Exact string
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "prefix": "str", # Optional. Prefix-based
+ match. Only 1 of ``exact``"" , ``prefix``"" ,
+ or ``regex`` must be set.
+ "regex": "str" # Optional. RE2 style
+ regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For
+ more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" #
+ Optional. The set of HTTP response headers that
+ browsers are allowed to access. This configures
+ the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" #
+ Optional. An optional duration specifying how long
+ browsers can cache the results of a preflight
+ request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "match": {
+ "authority": {
+ "exact":
+ "str" # Required.
+ },
+ "path": {
+ "prefix":
+ "str" # Prefix-based match. For example,
+ ``/api`` will match ``/api``"" , ``/api/``"" ,
+ and any nested paths such as
+ ``/api/v1/endpoint``. Required.
+ }
+ },
+ "redirect": {
+ "authority": "str",
+ # Optional. The authority/host to redirect to. This
+ can be a hostname or IP address. Note: use ``port``
+ to set the port.
+ "port": 0, #
+ Optional. The port to redirect to.
+ "redirect_code": 0,
+ # Optional. The redirect code to use. Defaults to
+ ``302``. Supported values are 300, 301, 302, 303,
+ 304, 307, 308.
+ "scheme": "str", #
+ Optional. The scheme to redirect to. Supported values
+ are ``http`` or ``https``. Default: ``https``.
+ "uri": "str" #
+ Optional. An optional URI path to redirect to. Note:
+ if this is specified the whole URI of the original
+ request will be overwritten to this value,
+ irrespective of the original request URI being
+ matched.
+ }
+ }
+ ]
},
- "instance_count": 1, # Optional. Default value is 1.
- The amount of instances that this component should be scaled to.
- Default: 1. Must not be set if autoscaling is used.
- "instance_size_slug": {},
- "kind": "UNSPECIFIED", # Optional. Default value is
- "UNSPECIFIED". * UNSPECIFIED: Default job type, will auto-complete to
- POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job that runs before an
- app deployment. * POST_DEPLOY: Indicates a job that runs after an app
- deployment. * FAILED_DEPLOY: Indicates a job that runs after a
- component fails to deploy. Known values are: "UNSPECIFIED",
- "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY".
- "log_destinations": [
+ "jobs": [
{
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
- },
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
- },
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
},
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
},
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
- }
- }
- ],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str", # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- "termination": {
- "grace_period_seconds": 0 # Optional. The
- number of seconds to wait between sending a TERM signal to a
- container and issuing a KILL which causes immediate shutdown.
- (Default 120).
- }
- }
- ],
- "maintenance": {
- "archive": bool, # Optional. Indicates whether the app
- should be archived. Setting this to true implies that enabled is set to
- true.
- "enabled": bool, # Optional. Indicates whether maintenance
- mode should be enabled for the app.
- "offline_page_url": "str" # Optional. A custom offline page
- to display when maintenance mode is enabled or the app is archived.
- },
- "region": "str", # Optional. The slug form of the geographical
- origin of the app. Default: ``nearest available``. Known values are: "atl",
- "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", "sgp", and "syd".
- "services": [
- {
- "autoscaling": {
- "max_instance_count": 0, # Optional. The
- maximum amount of instances for this component. Must be more than
- min_instance_count.
- "metrics": {
- "cpu": {
- "percent": 80 # Optional.
- Default value is 80. The average target CPU utilization
- for the component.
- }
- },
- "min_instance_count": 0 # Optional. The
- minimum amount of instances for this component. Must be less than
- max_instance_count.
- },
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "cors": {
- "allow_credentials": bool, # Optional.
- Whether browsers should expose the response to the client-side
- JavaScript code when the request"u2019s credentials mode is
- include. This configures the ``Access-Control-Allow-Credentials``
- header.
- "allow_headers": [
- "str" # Optional. The set of allowed
- HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of allowed
- HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
- ],
- "allow_origins": [
- {
- "exact": "str", # Optional.
- Exact string match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "prefix": "str", # Optional.
- Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "regex": "str" # Optional.
- RE2 style regex-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set. For more
- information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
- }
- ],
- "expose_headers": [
- "str" # Optional. The set of HTTP
- response headers that browsers are allowed to access. This
- configures the ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An optional
- duration specifying how long browsers can cache the results of a
- preflight request. This configures the ``Access-Control-Max-Age``
- header.
- },
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
- }
- ],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "health_check": {
- "failure_threshold": 0, # Optional. The
- number of failed health checks before considered unhealthy.
- "http_path": "str", # Optional. The route
- path used for the HTTP health check ping. If not set, the HTTP
- health check will be disabled and a TCP health check used
- instead.
- "initial_delay_seconds": 0, # Optional. The
- number of seconds to wait before beginning health checks.
- "period_seconds": 0, # Optional. The number
- of seconds to wait between health checks.
- "port": 0, # Optional. The port on which the
- health check will be performed. If not set, the health check will
- be performed on the component's http_port.
- "success_threshold": 0, # Optional. The
- number of successful health checks before considered healthy.
- "timeout_seconds": 0 # Optional. The number
- of seconds after which the check times out.
- },
- "http_port": 0, # Optional. The internal port on
- which this service's run command will listen. Default: 8080 If there
- is not an environment variable with the name ``PORT``"" , one will be
- automatically added with its value set to the value of this field.
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
- },
- "instance_count": 1, # Optional. Default value is 1.
- The amount of instances that this component should be scaled to.
- Default: 1. Must not be set if autoscaling is used.
- "instance_size_slug": {},
- "internal_ports": [
- 0 # Optional. The ports on which this
- service will listen for internal traffic.
- ],
- "liveness_health_check": {
- "failure_threshold": 0, # Optional. The
- number of failed health checks before considered unhealthy.
- "http_path": "str", # Optional. The route
- path used for the HTTP health check ping. If not set, the HTTP
- health check will be disabled and a TCP health check used
- instead.
- "initial_delay_seconds": 0, # Optional. The
- number of seconds to wait before beginning health checks.
- "period_seconds": 0, # Optional. The number
- of seconds to wait between health checks.
- "port": 0, # Optional. The port on which the
- health check will be performed.
- "success_threshold": 0, # Optional. The
- number of successful health checks before considered healthy.
- "timeout_seconds": 0 # Optional. The number
- of seconds after which the check times out.
- },
- "log_destinations": [
- {
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
},
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
},
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
},
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
},
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
- }
- }
- ],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "protocol": "str", # Optional. The protocol which
- the service uses to serve traffic on the http_port. * ``HTTP``"" :
- The app is serving the HTTP protocol. Default. * ``HTTP2``"" : The
- app is serving the HTTP/2 protocol. Currently, this needs to be
- implemented in the service by serving HTTP/2 cleartext (h2c). Known
- values are: "HTTP" and "HTTP2".
- "routes": [
- {
- "path": "str", # Optional.
- (Deprecated - Use Ingress Rules instead). An HTTP path
- prefix. Paths must start with / and must be unique across all
- components within an app.
- "preserve_path_prefix": bool #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``.
- }
- ],
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str", # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- "termination": {
- "drain_seconds": 0, # Optional. The number
- of seconds to wait between selecting a container instance for
- termination and issuing the TERM signal. Selecting a container
- instance for termination begins an asynchronous drain of new
- requests on upstream load-balancers. (Default 15).
- "grace_period_seconds": 0 # Optional. The
- number of seconds to wait between sending a TERM signal to a
- container and issuing a KILL which causes immediate shutdown.
- (Default 120).
- }
- }
- ],
- "static_sites": [
- {
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "catchall_document": "str", # Optional. The name of
- the document to use as the fallback for any requests to documents
- that are not found when serving this static site. Only 1 of
- ``catchall_document`` or ``error_document`` can be set.
- "cors": {
- "allow_credentials": bool, # Optional.
- Whether browsers should expose the response to the client-side
- JavaScript code when the request"u2019s credentials mode is
- include. This configures the ``Access-Control-Allow-Credentials``
- header.
- "allow_headers": [
- "str" # Optional. The set of allowed
- HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of allowed
- HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
- ],
- "allow_origins": [
- {
- "exact": "str", # Optional.
- Exact string match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "prefix": "str", # Optional.
- Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "regex": "str" # Optional.
- RE2 style regex-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set. For more
- information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "kind": "UNSPECIFIED", # Optional.
+ Default value is "UNSPECIFIED". * UNSPECIFIED: Default job
+ type, will auto-complete to POST_DEPLOY kind. * PRE_DEPLOY:
+ Indicates a job that runs before an app deployment. *
+ POST_DEPLOY: Indicates a job that runs after an app
+ deployment. * FAILED_DEPLOY: Indicates a job that runs after
+ a component fails to deploy. Known values are: "UNSPECIFIED",
+ "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY".
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
}
- ],
- "expose_headers": [
- "str" # Optional. The set of HTTP
- response headers that browsers are allowed to access. This
- configures the ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An optional
- duration specifying how long browsers can cache the results of a
- preflight request. This configures the ``Access-Control-Max-Age``
- header.
- },
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
}
],
- "error_document": "404.html", # Optional. Default
- value is "404.html". The name of the error document to use when
- serving this static site. Default: 404.html. If no such file exists
- within the built assets, App Platform will supply one.
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
+ "maintenance": {
+ "archive": bool, # Optional. Indicates
+ whether the app should be archived. Setting this to true implies
+ that enabled is set to true.
+ "enabled": bool, # Optional. Indicates
+ whether maintenance mode should be enabled for the app.
+ "offline_page_url": "str" # Optional. A
+ custom offline page to display when maintenance mode is enabled
+ or the app is archived.
},
- "index_document": "index.html", # Optional. Default
- value is "index.html". The name of the index document to use when
- serving this static site. Default: index.html.
- "log_destinations": [
+ "region": "str", # Optional. The slug form of the
+ geographical origin of the app. Default: ``nearest available``. Known
+ values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr",
+ "sgp", and "syd".
+ "services": [
{
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
},
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
},
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed. If not
+ set, the health check will be performed on the
+ component's http_port.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "http_port": 0, # Optional. The
+ internal port on which this service's run command will
+ listen. Default: 8080 If there is not an environment variable
+ with the name ``PORT``"" , one will be automatically added
+ with its value set to the value of this field.
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
},
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
},
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "internal_ports": [
+ 0 # Optional. The ports on
+ which this service will listen for internal traffic.
+ ],
+ "liveness_health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "protocol": "str", # Optional. The
+ protocol which the service uses to serve traffic on the
+ http_port. * ``HTTP``"" : The app is serving the HTTP
+ protocol. Default. * ``HTTP2``"" : The app is serving the
+ HTTP/2 protocol. Currently, this needs to be implemented in
+ the service by serving HTTP/2 cleartext (h2c). Known values
+ are: "HTTP" and "HTTP2".
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
+ ],
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "drain_seconds": 0, #
+ Optional. The number of seconds to wait between selecting
+ a container instance for termination and issuing the TERM
+ signal. Selecting a container instance for termination
+ begins an asynchronous drain of new requests on upstream
+ load-balancers. (Default 15).
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
}
}
],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "output_dir": "str", # Optional. An optional path to
- where the built assets will be located, relative to the build
- context. If not set, App Platform will automatically scan for these
- directory names: ``_static``"" , ``dist``"" , ``public``"" ,
- ``build``.
- "routes": [
+ "static_sites": [
{
- "path": "str", # Optional.
- (Deprecated - Use Ingress Rules instead). An HTTP path
- prefix. Paths must start with / and must be unique across all
- components within an app.
- "preserve_path_prefix": bool #
- Optional. An optional flag to preserve the path that is
- forwarded to the backend service. By default, the HTTP
- request path will be trimmed from the left when forwarded to
- the component. For example, a component with ``path=/api``
- will have requests to ``/api/list`` trimmed to ``/list``. If
- this value is ``true``"" , the path will remain
- ``/api/list``.
- }
- ],
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str" # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- }
- ],
- "vpc": {
- "egress_ips": [
- {
- "ip": "str" # Optional. The egress ips
- associated with the VPC.
- }
- ],
- "id": "str" # Optional. The ID of the VPC.
- },
- "workers": [
- {
- "autoscaling": {
- "max_instance_count": 0, # Optional. The
- maximum amount of instances for this component. Must be more than
- min_instance_count.
- "metrics": {
- "cpu": {
- "percent": 80 # Optional.
- Default value is 80. The average target CPU utilization
- for the component.
- }
- },
- "min_instance_count": 0 # Optional. The
- minimum amount of instances for this component. Must be less than
- max_instance_count.
- },
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "build_command": "str", # Optional. An optional
- build command to run while building this component from source.
- "dockerfile_path": "str", # Optional. The path to
- the Dockerfile relative to the root of the repo. If set, it will be
- used to build this component. Otherwise, App Platform will attempt to
- build it using buildpacks.
- "environment_slug": "str", # Optional. An
- environment slug describing the type of this app. For a full list,
- please refer to `the product documentation
- `_.
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "catchall_document": "str", #
+ Optional. The name of the document to use as the fallback for
+ any requests to documents that are not found when serving
+ this static site. Only 1 of ``catchall_document`` or
+ ``error_document`` can be set.
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to
+ the client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional.
+ The set of allowed HTTP request headers. This
+ configures the ``Access-Control-Allow-Headers``
+ header.
+ ],
+ "allow_methods": [
+ "str" # Optional.
+ The set of allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact":
+ "str", # Optional. Exact string match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "prefix":
+ "str", # Optional. Prefix-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set.
+ "regex":
+ "str" # Optional. RE2 style regex-based match.
+ Only 1 of ``exact``"" , ``prefix``"" , or
+ ``regex`` must be set. For more information about
+ RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional.
+ The set of HTTP response headers that browsers are
+ allowed to access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional.
+ An optional duration specifying how long browsers can
+ cache the results of a preflight request. This configures
+ the ``Access-Control-Max-Age`` header.
+ },
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "error_document": "404.html", #
+ Optional. Default value is "404.html". The name of the error
+ document to use when serving this static site. Default:
+ 404.html. If no such file exists within the built assets, App
+ Platform will supply one.
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
+ },
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
+ },
+ "index_document": "index.html", #
+ Optional. Default value is "index.html". The name of the
+ index document to use when serving this static site. Default:
+ index.html.
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "output_dir": "str", # Optional. An
+ optional path to where the built assets will be located,
+ relative to the build context. If not set, App Platform will
+ automatically scan for these directory names: ``_static``"" ,
+ ``dist``"" , ``public``"" , ``build``.
+ "routes": [
+ {
+ "path": "str", #
+ Optional. (Deprecated - Use Ingress Rules instead).
+ An HTTP path prefix. Paths must start with / and must
+ be unique across all components within an app.
+ "preserve_path_prefix": bool # Optional. An optional
+ flag to preserve the path that is forwarded to the
+ backend service. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with
+ ``path=/api`` will have requests to ``/api/list``
+ trimmed to ``/list``. If this value is ``true``"" ,
+ the path will remain ``/api/list``.
+ }
+ ],
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str" # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
}
],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "image": {
- "deploy_on_push": {
- "enabled": bool # Optional. Whether
- to automatically deploy new images. Can only be used for
- images hosted in DOCR and can only be used with an image tag,
- not a specific digest.
- },
- "digest": "str", # Optional. The image
- digest. Cannot be specified if tag is provided.
- "registry": "str", # Optional. The registry
- name. Must be left empty for the ``DOCR`` registry type.
- "registry_credentials": "str", # Optional.
- The credentials to be able to pull the image. The value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used. * "$username:$access_token" for
- registries of type ``DOCKER_HUB``. * "$username:$access_token"
- for registries of type ``GHCR``.
- "registry_type": "str", # Optional. *
- DOCKER_HUB: The DockerHub container registry type. * DOCR: The
- DigitalOcean container registry type. * GHCR: The Github
- container registry type. Known values are: "DOCKER_HUB", "DOCR",
- and "GHCR".
- "repository": "str", # Optional. The
- repository name.
- "tag": "latest" # Optional. Default value is
- "latest". The repository tag. Defaults to ``latest`` if not
- provided and no digest is provided. Cannot be specified if digest
- is provided.
- },
- "instance_count": 1, # Optional. Default value is 1.
- The amount of instances that this component should be scaled to.
- Default: 1. Must not be set if autoscaling is used.
- "instance_size_slug": {},
- "liveness_health_check": {
- "failure_threshold": 0, # Optional. The
- number of failed health checks before considered unhealthy.
- "http_path": "str", # Optional. The route
- path used for the HTTP health check ping. If not set, the HTTP
- health check will be disabled and a TCP health check used
- instead.
- "initial_delay_seconds": 0, # Optional. The
- number of seconds to wait before beginning health checks.
- "period_seconds": 0, # Optional. The number
- of seconds to wait between health checks.
- "port": 0, # Optional. The port on which the
- health check will be performed.
- "success_threshold": 0, # Optional. The
- number of successful health checks before considered healthy.
- "timeout_seconds": 0 # Optional. The number
- of seconds after which the check times out.
+ "vpc": {
+ "egress_ips": [
+ {
+ "ip": "str" # Optional. The
+ egress ips associated with the VPC.
+ }
+ ],
+ "id": "str" # Optional. The ID of the VPC.
},
- "log_destinations": [
+ "workers": [
{
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
+ "autoscaling": {
+ "max_instance_count": 0, #
+ Optional. The maximum amount of instances for this
+ component. Must be more than min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80
+ # Optional. Default value is 80. The average
+ target CPU utilization for the component.
+ }
+ },
+ "min_instance_count": 0 #
+ Optional. The minimum amount of instances for this
+ component. Must be less than max_instance_count.
},
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
+ "bitbucket": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
},
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
+ "build_command": "str", # Optional.
+ An optional build command to run while building this
+ component from source.
+ "dockerfile_path": "str", #
+ Optional. The path to the Dockerfile relative to the root of
+ the repo. If set, it will be used to build this component.
+ Otherwise, App Platform will attempt to build it using
+ buildpacks.
+ "environment_slug": "str", #
+ Optional. An environment slug describing the type of this
+ app. For a full list, please refer to `the product
+ documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The
+ variable name. Required.
+ "scope":
+ "RUN_AND_BUILD_TIME", # Optional. Default value is
+ "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only
+ at run-time * BUILD_TIME: Made available only at
+ build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", #
+ Optional. Default value is "GENERAL". * GENERAL: A
+ plain-text environment variable * SECRET: A secret
+ encrypted environment variable. Known values are:
+ "GENERAL" and "SECRET".
+ "value": "str" #
+ Optional. The value. If the type is ``SECRET``"" ,
+ the value will be encrypted on first submission. On
+ following submissions, the encrypted value should be
+ used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "repo_clone_url": "str" #
+ Optional. The clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional.
+ The name of the branch to use.
+ "deploy_on_push": bool, #
+ Optional. Whether to automatically deploy new commits
+ made to the repo.
+ "repo": "str" # Optional.
+ The name of the repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool #
+ Optional. Whether to automatically deploy new images.
+ Can only be used for images hosted in DOCR and can
+ only be used with an image tag, not a specific
+ digest.
},
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
- "logs".
+ "digest": "str", # Optional.
+ The image digest. Cannot be specified if tag is provided.
+ "registry": "str", #
+ Optional. The registry name. Must be left empty for the
+ ``DOCR`` registry type.
+ "registry_credentials":
+ "str", # Optional. The credentials to be able to pull
+ the image. The value will be encrypted on first
+ submission. On following submissions, the encrypted value
+ should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. *
+ "$username:$access_token" for registries of type
+ ``GHCR``.
+ "registry_type": "str", #
+ Optional. * DOCKER_HUB: The DockerHub container registry
+ type. * DOCR: The DigitalOcean container registry type. *
+ GHCR: The Github container registry type. Known values
+ are: "DOCKER_HUB", "DOCR", and "GHCR".
+ "repository": "str", #
+ Optional. The repository name.
+ "tag": "latest" # Optional.
+ Default value is "latest". The repository tag. Defaults
+ to ``latest`` if not provided and no digest is provided.
+ Cannot be specified if digest is provided.
},
- "papertrail": {
- "endpoint": "str" #
- Papertrail syslog endpoint. Required.
+ "instance_count": 1, # Optional.
+ Default value is 1. The amount of instances that this
+ component should be scaled to. Default: 1. Must not be set if
+ autoscaling is used.
+ "instance_size_slug": {},
+ "liveness_health_check": {
+ "failure_threshold": 0, #
+ Optional. The number of failed health checks before
+ considered unhealthy.
+ "http_path": "str", #
+ Optional. The route path used for the HTTP health check
+ ping. If not set, the HTTP health check will be disabled
+ and a TCP health check used instead.
+ "initial_delay_seconds": 0,
+ # Optional. The number of seconds to wait before
+ beginning health checks.
+ "period_seconds": 0, #
+ Optional. The number of seconds to wait between health
+ checks.
+ "port": 0, # Optional. The
+ port on which the health check will be performed.
+ "success_threshold": 0, #
+ Optional. The number of successful health checks before
+ considered healthy.
+ "timeout_seconds": 0 #
+ Optional. The number of seconds after which the check
+ times out.
+ },
+ "log_destinations": [
+ {
+ "name": "str", #
+ Required.
+ "datadog": {
+ "api_key":
+ "str", # Datadog API key. Required.
+ "endpoint":
+ "str" # Optional. Datadog HTTP log intake
+ endpoint.
+ },
+ "logtail": {
+ "token":
+ "str" # Optional. Logtail token.
+ },
+ "open_search": {
+ "basic_auth":
+ {
+ "password": "str", # Optional. Password for
+ user defined in User. Is required when
+ ``endpoint`` is set. Cannot be set if using a
+ DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" # Optional. Username to
+ authenticate with. Only required when
+ ``endpoint`` is set. Defaults to ``doadmin``
+ when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", # Optional. The name of a
+ DigitalOcean DBaaS OpenSearch cluster to use as a
+ log forwarding destination. Cannot be specified
+ if ``endpoint`` is also specified.
+ "endpoint":
+ "str", # Optional. OpenSearch API Endpoint. Only
+ HTTPS is supported. Format:
+ https://:code:``::code:``. Cannot be
+ specified if ``cluster_name`` is also specified.
+ "index_name":
+ "logs" # Optional. Default value is "logs". The
+ index name to use for the logs. If not set, the
+ default index name is "logs".
+ },
+ "papertrail": {
+ "endpoint":
+ "str" # Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name.
+ Must be unique across all components within the same app.
+ "run_command": "str", # Optional. An
+ optional run command to override the component's default.
+ "source_dir": "str", # Optional. An
+ optional path to the working directory to use for the build.
+ For Dockerfile builds, this will be used as the build
+ context. Must be relative to the root of the repo.
+ "termination": {
+ "grace_period_seconds": 0 #
+ Optional. The number of seconds to wait between sending a
+ TERM signal to a container and issuing a KILL which
+ causes immediate shutdown. (Default 120).
}
}
- ],
- "name": "str", # Optional. The name. Must be unique
- across all components within the same app.
- "run_command": "str", # Optional. An optional run
- command to override the component's default.
- "source_dir": "str", # Optional. An optional path to
- the working directory to use for the build. For Dockerfile builds,
- this will be used as the build context. Must be relative to the root
- of the repo.
- "termination": {
- "grace_period_seconds": 0 # Optional. The
- number of seconds to wait between sending a TERM signal to a
- container and issuing a KILL which causes immediate shutdown.
- (Default 120).
+ ]
+ },
+ "static_sites": [
+ {
+ "name": "str", # Optional. The name of this
+ static site.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this static
+ site.
}
- }
- ]
+ ],
+ "tier_slug": "str", # Optional. The current pricing tier
+ slug of the deployment.
+ "updated_at": "2020-02-20 00:00:00", # Optional. When the
+ deployment was last updated.
+ "workers": [
+ {
+ "name": "str", # Optional. The name of this
+ worker.
+ "source_commit_hash": "str" # Optional. The
+ commit hash of the repository that was used to build this worker.
+ }
+ ]
+ },
+ "deployment_id": "str", # Optional. For deployment events, this is
+ the same as the deployment's ID. For autoscaling events, this is the
+ deployment that was autoscaled.
+ "id": "str", # Optional. The ID of the event (UUID).
+ "type": "str" # Optional. The type of event. Known values are:
+ "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING".
}
}
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
"""
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_cancel_event_request(
+ app_id=app_id,
+ event_id=event_id,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
@distributed_trace
- def validate_app_spec(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON:
+ def get_event_logs(
+ self,
+ app_id: str,
+ event_id: str,
+ *,
+ follow: Optional[bool] = None,
+ type: str = "UNSPECIFIED",
+ pod_connection_timeout: Optional[str] = None,
+ **kwargs: Any,
+ ) -> JSON:
# pylint: disable=line-too-long
- """Propose an App Spec.
+ """Retrieve Event Logs.
- To propose and validate a spec for a new or existing app, send a POST request to the
- ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app,
- including app cost and upgrade cost. If an existing app ID is specified, the app spec is
- treated as a proposed update to the existing app.
+ Retrieve the logs of an autoscaling event for an app.
- :param body: Is either a JSON type or a IO[bytes] type. Required.
- :type body: JSON or IO[bytes]
+ :param app_id: The app ID. Required.
+ :type app_id: str
+ :param event_id: The event ID. Required.
+ :type event_id: str
+ :keyword follow: Whether the logs should follow live updates. Default value is None.
+ :paramtype follow: bool
+ :keyword type: The type of logs to retrieve
+
+
+ * BUILD: Build-time logs
+ * DEPLOY: Deploy-time logs
+ * RUN: Live run-time logs
+ * RUN_RESTARTED: Logs of crashed/restarted instances during runtime
+ * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are:
+ "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value
+ is "UNSPECIFIED".
+ :paramtype type: str
+ :keyword pod_connection_timeout: An optional time duration to wait if the underlying component
+ instance is not immediately available. Default: ``3m``. Default value is None.
+ :paramtype pod_connection_timeout: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
@@ -104213,247 +105967,720 @@ def validate_app_spec(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "spec": {
- "name": "str", # The name of the app. Must be unique across all apps
- in the same account. Required.
- "databases": [
- {
- "name": "str", # The database's name. The name must
- be unique across all components within the same app and cannot use
- capital letters. Required.
- "cluster_name": "str", # Optional. The name of the
- underlying DigitalOcean DBaaS cluster. This is required for
- production databases. For dev databases, if cluster_name is not set,
- a new cluster will be provisioned.
- "db_name": "str", # Optional. The name of the MySQL
- or PostgreSQL database to configure.
- "db_user": "str", # Optional. The name of the MySQL
- or PostgreSQL user to configure.
- "engine": "UNSET", # Optional. Default value is
- "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB:
- MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey.
- Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB",
- "KAFKA", "OPENSEARCH", and "VALKEY".
- "production": bool, # Optional. Whether this is a
- production or dev database.
- "version": "str" # Optional. The version of the
- database engine.
- }
- ],
- "disable_edge_cache": False, # Optional. Default value is False. ..
- role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app
- will **not** be cached at the edge (CDN). Enable this option if you want to
- manage CDN configuration yourself"u2014whether by using an external CDN
- provider or by handling static content and caching within your app. This
- setting is also recommended for apps that require real-time data or serve
- dynamic content, such as those using Server-Sent Events (SSE) over GET, or
- hosting an MCP (Model Context Protocol) Server that utilizes SSE.""
- :raw-html-m2r:`
` **Note:** This feature is not available for static site
- components."" :raw-html-m2r:`
` For more information, see `Disable CDN
- Cache
- `_.
- "disable_email_obfuscation": False, # Optional. Default value is
- False. If set to ``true``"" , email addresses in the app will not be
- obfuscated. This is useful for apps that require email addresses to be
- visible (in the HTML markup).
- "domains": [
- {
- "domain": "str", # The hostname for the domain.
- Required.
- "minimum_tls_version": "str", # Optional. The
- minimum version of TLS a client application can use to access
- resources for the domain. Must be one of the following values
- wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are:
- "1.2" and "1.3".
- "type": "UNSPECIFIED", # Optional. Default value is
- "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain
- assigned to this app * PRIMARY: The primary domain for this app that
- is displayed as the default in the control panel, used in bindable
- environment variables, and any other places that reference an app's
- live URL. Only one domain may be set as primary. * ALIAS: A
- non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT",
- "PRIMARY", and "ALIAS".
- "wildcard": bool, # Optional. Indicates whether the
- domain includes all sub-domains, in addition to the given domain.
- "zone": "str" # Optional. Optional. If the domain
- uses DigitalOcean DNS and you would like App Platform to
- automatically manage it for you, set this to the name of the domain
- on your account. For example, If the domain you are adding is
- ``app.domain.com``"" , the zone could be ``domain.com``.
- }
- ],
- "egress": {
- "type": "AUTOASSIGN" # Optional. Default value is
- "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and
- "DEDICATED_IP".
- },
- "enhanced_threat_control_enabled": False, # Optional. Default value
- is False. If set to ``true``"" , suspicious requests will go through
- additional security checks to help mitigate layer 7 DDoS attacks.
- "functions": [
- {
- "name": "str", # The name. Must be unique across all
- components within the same app. Required.
- "alerts": [
- {
- "disabled": bool, # Optional. Is the
- alert disabled?.
- "operator": "UNSPECIFIED_OPERATOR",
- # Optional. Default value is "UNSPECIFIED_OPERATOR". Known
- values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
- "LESS_THAN".
- "rule": "UNSPECIFIED_RULE", #
- Optional. Default value is "UNSPECIFIED_RULE". Known values
- are: "UNSPECIFIED_RULE", "CPU_UTILIZATION",
- "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED",
- "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE",
- "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED",
- "FUNCTIONS_ACTIVATION_COUNT",
- "FUNCTIONS_AVERAGE_DURATION_MS",
- "FUNCTIONS_ERROR_RATE_PER_MINUTE",
- "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT",
- and "FUNCTIONS_GB_RATE_PER_SECOND".
- "value": 0.0, # Optional. Threshold
- value for alert.
- "window": "UNSPECIFIED_WINDOW" #
- Optional. Default value is "UNSPECIFIED_WINDOW". Known values
- are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
- "THIRTY_MINUTES", and "ONE_HOUR".
- }
- ],
- "bitbucket": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "cors": {
- "allow_credentials": bool, # Optional.
- Whether browsers should expose the response to the client-side
- JavaScript code when the request"u2019s credentials mode is
- include. This configures the ``Access-Control-Allow-Credentials``
- header.
- "allow_headers": [
- "str" # Optional. The set of allowed
- HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of allowed
- HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
- ],
- "allow_origins": [
- {
- "exact": "str", # Optional.
- Exact string match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "prefix": "str", # Optional.
- Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
- , or ``regex`` must be set.
- "regex": "str" # Optional.
- RE2 style regex-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set. For more
- information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
- }
- ],
- "expose_headers": [
- "str" # Optional. The set of HTTP
- response headers that browsers are allowed to access. This
- configures the ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An optional
- duration specifying how long browsers can cache the results of a
- preflight request. This configures the ``Access-Control-Max-Age``
- header.
- },
- "envs": [
- {
- "key": "str", # The variable name.
- Required.
- "scope": "RUN_AND_BUILD_TIME", #
- Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
- Made available only at run-time * BUILD_TIME: Made available
- only at build-time * RUN_AND_BUILD_TIME: Made available at
- both build and run-time. Known values are: "UNSET",
- "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", # Optional.
- Default value is "GENERAL". * GENERAL: A plain-text
- environment variable * SECRET: A secret encrypted environment
- variable. Known values are: "GENERAL" and "SECRET".
- "value": "str" # Optional. The
- value. If the type is ``SECRET``"" , the value will be
- encrypted on first submission. On following submissions, the
- encrypted value should be used.
- }
- ],
- "git": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "repo_clone_url": "str" # Optional. The
- clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The name of the
- branch to use.
- "deploy_on_push": bool, # Optional. Whether
- to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name of the
- repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "log_destinations": [
- {
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", # Datadog
- API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
- },
- "logtail": {
- "token": "str" # Optional.
- Logtail token.
- },
- "open_search": {
- "basic_auth": {
- "password": "str", #
- Optional. Password for user defined in User. Is
- required when ``endpoint`` is set. Cannot be set if
- using a DigitalOcean DBaaS OpenSearch cluster.
- "user": "str" #
- Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
- },
- "cluster_name": "str", #
- Optional. The name of a DigitalOcean DBaaS OpenSearch
- cluster to use as a log forwarding destination. Cannot be
- specified if ``endpoint`` is also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format: https://:code:``::code:``.
- Cannot be specified if ``cluster_name`` is also
- specified.
- "index_name": "logs" #
- Optional. Default value is "logs". The index name to use
- for the logs. If not set, the default index name is
+ # response body for status code(s): 200
+ response == {
+ "historic_urls": [
+ "str" # Optional. A list of URLs to archived log files.
+ ],
+ "live_url": "str" # Optional. A URL of the real-time live logs. This URL may
+ use either the ``https://`` or ``wss://`` protocols and will keep pushing live
+ logs as they become available.
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_get_event_logs_request(
+ app_id=app_id,
+ event_id=event_id,
+ follow=follow,
+ type=type,
+ pod_connection_timeout=pod_connection_timeout,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace
+ def list_instance_sizes(self, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """List Instance Sizes.
+
+ List all instance sizes for ``service``\\ , ``worker``\\ , and ``job`` components.
+
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "discount_percent": 0.0, # Optional.
+ "instance_sizes": [
+ {
+ "bandwidth_allowance_gib": "str", # Optional. The bandwidth
+ allowance in GiB for the instance size.
+ "cpu_type": "UNSPECIFIED", # Optional. Default value is
+ "UNSPECIFIED". * SHARED: Shared vCPU cores * DEDICATED: Dedicated vCPU
+ cores. Known values are: "UNSPECIFIED", "SHARED", and "DEDICATED".
+ "cpus": "str", # Optional. The number of allotted vCPU
+ cores.
+ "deprecation_intent": bool, # Optional. Indicates if the
+ instance size is intended for deprecation.
+ "memory_bytes": "str", # Optional. The allotted memory in
+ bytes.
+ "name": "str", # Optional. A human-readable name of the
+ instance size.
+ "scalable": bool, # Optional. Indicates if the instance size
+ can enable autoscaling.
+ "single_instance_only": bool, # Optional. Indicates if the
+ instance size allows more than one instance.
+ "slug": "str", # Optional. The slug of the instance size.
+ "tier_downgrade_to": "str", # Optional. The slug of the
+ corresponding downgradable instance size on the lower tier.
+ "tier_slug": "str", # Optional. The slug of the tier to
+ which this instance size belongs.
+ "tier_upgrade_to": "str", # Optional. The slug of the
+ corresponding upgradable instance size on the higher tier.
+ "usd_per_month": "str", # Optional. The cost of this
+ instance size in USD per month.
+ "usd_per_second": "str" # Optional. The cost of this
+ instance size in USD per second.
+ }
+ ]
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_list_instance_sizes_request(
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ if _stream:
+ response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace
+ def get_instance_size(self, slug: str, **kwargs: Any) -> JSON:
+ # pylint: disable=line-too-long
+ """Retrieve an Instance Size.
+
+ Retrieve information about a specific instance size for ``service``\\ , ``worker``\\ , and
+ ``job`` components.
+
+ :param slug: The slug of the instance size. Required.
+ :type slug: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "instance_size": {
+ "bandwidth_allowance_gib": "str", # Optional. The bandwidth
+ allowance in GiB for the instance size.
+ "cpu_type": "UNSPECIFIED", # Optional. Default value is
+ "UNSPECIFIED". * SHARED: Shared vCPU cores * DEDICATED: Dedicated vCPU
+ cores. Known values are: "UNSPECIFIED", "SHARED", and "DEDICATED".
+ "cpus": "str", # Optional. The number of allotted vCPU cores.
+ "deprecation_intent": bool, # Optional. Indicates if the instance
+ size is intended for deprecation.
+ "memory_bytes": "str", # Optional. The allotted memory in bytes.
+ "name": "str", # Optional. A human-readable name of the instance
+ size.
+ "scalable": bool, # Optional. Indicates if the instance size can
+ enable autoscaling.
+ "single_instance_only": bool, # Optional. Indicates if the instance
+ size allows more than one instance.
+ "slug": "str", # Optional. The slug of the instance size.
+ "tier_downgrade_to": "str", # Optional. The slug of the
+ corresponding downgradable instance size on the lower tier.
+ "tier_slug": "str", # Optional. The slug of the tier to which this
+ instance size belongs.
+ "tier_upgrade_to": "str", # Optional. The slug of the corresponding
+ upgradable instance size on the higher tier.
+ "usd_per_month": "str", # Optional. The cost of this instance size
+ in USD per month.
+ "usd_per_second": "str" # Optional. The cost of this instance size
+ in USD per second.
+ }
+ }
+ # response body for status code(s): 404
+ response == {
+ "id": "str", # A short identifier corresponding to the HTTP status code
+ returned. For example, the ID for a response returning a 404 status code would
+ be "not_found.". Required.
+ "message": "str", # A message providing additional information about the
+ error, including details to help resolve it when possible. Required.
+ "request_id": "str" # Optional. Optionally, some endpoints may include a
+ request ID that should be provided when reporting bugs or opening support
+ tickets to help identify the issue.
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_get_instance_size_request(
+ slug=slug,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 404]:
+ if _stream:
+ response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if response.status_code == 404:
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @distributed_trace
+ def list_regions(self, **kwargs: Any) -> JSON:
+ """List App Regions.
+
+ List all regions supported by App Platform.
+
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # response body for status code(s): 200
+ response == {
+ "regions": [
+ {
+ "continent": "str", # Optional. The continent that this
+ region is in.
+ "data_centers": [
+ "str" # Optional. Data centers that are in this
+ region.
+ ],
+ "default": bool, # Optional. Whether or not the region is
+ presented as the default.
+ "disabled": bool, # Optional. Whether or not the region is
+ open for new apps.
+ "flag": "str", # Optional. The flag of this region.
+ "label": "str", # Optional. A human-readable name of the
+ region.
+ "reason": "str", # Optional. Reason that this region is not
+ available.
+ "slug": "str" # Optional. The slug form of the region name.
+ }
+ ]
+ }
+ """
+ error_map: MutableMapping[int, Type[HttpResponseError]] = {
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ 401: cast(
+ Type[HttpResponseError],
+ lambda response: ClientAuthenticationError(response=response),
+ ),
+ 429: HttpResponseError,
+ 500: HttpResponseError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = kwargs.pop("params", {}) or {}
+
+ cls: ClsType[JSON] = kwargs.pop("cls", None)
+
+ _request = build_apps_list_regions_request(
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = (
+ self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ if _stream:
+ response.read() # Load the body in memory and close the socket
+ map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
+ raise HttpResponseError(response=response)
+
+ response_headers = {}
+ response_headers["ratelimit-limit"] = self._deserialize(
+ "int", response.headers.get("ratelimit-limit")
+ )
+ response_headers["ratelimit-remaining"] = self._deserialize(
+ "int", response.headers.get("ratelimit-remaining")
+ )
+ response_headers["ratelimit-reset"] = self._deserialize(
+ "int", response.headers.get("ratelimit-reset")
+ )
+
+ if response.content:
+ deserialized = response.json()
+ else:
+ deserialized = None
+
+ if cls:
+ return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
+
+ return cast(JSON, deserialized) # type: ignore
+
+ @overload
+ def validate_app_spec(
+ self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
+ ) -> JSON:
+ # pylint: disable=line-too-long
+ """Propose an App Spec.
+
+ To propose and validate a spec for a new or existing app, send a POST request to the
+ ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app,
+ including app cost and upgrade cost. If an existing app ID is specified, the app spec is
+ treated as a proposed update to the existing app.
+
+ :param body: Required.
+ :type body: JSON
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: JSON object
+ :rtype: JSON
+ :raises ~azure.core.exceptions.HttpResponseError:
+
+ Example:
+ .. code-block:: python
+
+ # JSON input template you can fill out and use as your body input.
+ body = {
+ "spec": {
+ "name": "str", # The name of the app. Must be unique across all apps
+ in the same account. Required.
+ "databases": [
+ {
+ "name": "str", # The database's name. The name must
+ be unique across all components within the same app and cannot use
+ capital letters. Required.
+ "cluster_name": "str", # Optional. The name of the
+ underlying DigitalOcean DBaaS cluster. This is required for
+ production databases. For dev databases, if cluster_name is not set,
+ a new cluster will be provisioned.
+ "db_name": "str", # Optional. The name of the MySQL
+ or PostgreSQL database to configure.
+ "db_user": "str", # Optional. The name of the MySQL
+ or PostgreSQL user to configure.
+ "engine": "UNSET", # Optional. Default value is
+ "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB:
+ MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey.
+ Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB",
+ "KAFKA", "OPENSEARCH", and "VALKEY".
+ "production": bool, # Optional. Whether this is a
+ production or dev database.
+ "version": "str" # Optional. The version of the
+ database engine.
+ }
+ ],
+ "disable_edge_cache": False, # Optional. Default value is False. ..
+ role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app
+ will **not** be cached at the edge (CDN). Enable this option if you want to
+ manage CDN configuration yourself"u2014whether by using an external CDN
+ provider or by handling static content and caching within your app. This
+ setting is also recommended for apps that require real-time data or serve
+ dynamic content, such as those using Server-Sent Events (SSE) over GET, or
+ hosting an MCP (Model Context Protocol) Server that utilizes SSE.""
+ :raw-html-m2r:`
` **Note:** This feature is not available for static site
+ components."" :raw-html-m2r:`
` For more information, see `Disable CDN
+ Cache
+ `_.
+ "disable_email_obfuscation": False, # Optional. Default value is
+ False. If set to ``true``"" , email addresses in the app will not be
+ obfuscated. This is useful for apps that require email addresses to be
+ visible (in the HTML markup).
+ "domains": [
+ {
+ "domain": "str", # The hostname for the domain.
+ Required.
+ "minimum_tls_version": "str", # Optional. The
+ minimum version of TLS a client application can use to access
+ resources for the domain. Must be one of the following values
+ wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are:
+ "1.2" and "1.3".
+ "type": "UNSPECIFIED", # Optional. Default value is
+ "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain
+ assigned to this app * PRIMARY: The primary domain for this app that
+ is displayed as the default in the control panel, used in bindable
+ environment variables, and any other places that reference an app's
+ live URL. Only one domain may be set as primary. * ALIAS: A
+ non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT",
+ "PRIMARY", and "ALIAS".
+ "wildcard": bool, # Optional. Indicates whether the
+ domain includes all sub-domains, in addition to the given domain.
+ "zone": "str" # Optional. Optional. If the domain
+ uses DigitalOcean DNS and you would like App Platform to
+ automatically manage it for you, set this to the name of the domain
+ on your account. For example, If the domain you are adding is
+ ``app.domain.com``"" , the zone could be ``domain.com``.
+ }
+ ],
+ "egress": {
+ "type": "AUTOASSIGN" # Optional. Default value is
+ "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and
+ "DEDICATED_IP".
+ },
+ "enhanced_threat_control_enabled": False, # Optional. Default value
+ is False. If set to ``true``"" , suspicious requests will go through
+ additional security checks to help mitigate layer 7 DDoS attacks.
+ "functions": [
+ {
+ "name": "str", # The name. Must be unique across all
+ components within the same app. Required.
+ "alerts": [
+ {
+ "disabled": bool, # Optional. Is the
+ alert disabled?.
+ "operator": "UNSPECIFIED_OPERATOR",
+ # Optional. Default value is "UNSPECIFIED_OPERATOR". Known
+ values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
+ "LESS_THAN".
+ "rule": "UNSPECIFIED_RULE", #
+ Optional. Default value is "UNSPECIFIED_RULE". Known values
+ are: "UNSPECIFIED_RULE", "CPU_UTILIZATION",
+ "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED",
+ "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE",
+ "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED",
+ "FUNCTIONS_ACTIVATION_COUNT",
+ "FUNCTIONS_AVERAGE_DURATION_MS",
+ "FUNCTIONS_ERROR_RATE_PER_MINUTE",
+ "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT",
+ and "FUNCTIONS_GB_RATE_PER_SECOND".
+ "value": 0.0, # Optional. Threshold
+ value for alert.
+ "window": "UNSPECIFIED_WINDOW" #
+ Optional. Default value is "UNSPECIFIED_WINDOW". Known values
+ are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
+ "THIRTY_MINUTES", and "ONE_HOUR".
+ }
+ ],
+ "bitbucket": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "deploy_on_push": bool, # Optional. Whether
+ to automatically deploy new commits made to the repo.
+ "repo": "str" # Optional. The name of the
+ repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "cors": {
+ "allow_credentials": bool, # Optional.
+ Whether browsers should expose the response to the client-side
+ JavaScript code when the request"u2019s credentials mode is
+ include. This configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" # Optional. The set of allowed
+ HTTP request headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" # Optional. The set of allowed
+ HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional.
+ Exact string match. Only 1 of ``exact``"" , ``prefix``""
+ , or ``regex`` must be set.
+ "prefix": "str", # Optional.
+ Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
+ , or ``regex`` must be set.
+ "regex": "str" # Optional.
+ RE2 style regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For more
+ information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional. The set of HTTP
+ response headers that browsers are allowed to access. This
+ configures the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional. An optional
+ duration specifying how long browsers can cache the results of a
+ preflight request. This configures the ``Access-Control-Max-Age``
+ header.
+ },
+ "envs": [
+ {
+ "key": "str", # The variable name.
+ Required.
+ "scope": "RUN_AND_BUILD_TIME", #
+ Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
+ Made available only at run-time * BUILD_TIME: Made available
+ only at build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", # Optional.
+ Default value is "GENERAL". * GENERAL: A plain-text
+ environment variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and "SECRET".
+ "value": "str" # Optional. The
+ value. If the type is ``SECRET``"" , the value will be
+ encrypted on first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "repo_clone_url": "str" # Optional. The
+ clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "deploy_on_push": bool, # Optional. Whether
+ to automatically deploy new commits made to the repo.
+ "repo": "str" # Optional. The name of the
+ repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "deploy_on_push": bool, # Optional. Whether
+ to automatically deploy new commits made to the repo.
+ "repo": "str" # Optional. The name of the
+ repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "log_destinations": [
+ {
+ "name": "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog
+ API key. Required.
+ "endpoint": "str" #
+ Optional. Datadog HTTP log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional.
+ Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", #
+ Optional. Password for user defined in User. Is
+ required when ``endpoint`` is set. Cannot be set if
+ using a DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" #
+ Optional. Username to authenticate with. Only
+ required when ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", #
+ Optional. The name of a DigitalOcean DBaaS OpenSearch
+ cluster to use as a log forwarding destination. Cannot be
+ specified if ``endpoint`` is also specified.
+ "endpoint": "str", #
+ Optional. OpenSearch API Endpoint. Only HTTPS is
+ supported. Format: https://:code:``::code:``.
+ Cannot be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" #
+ Optional. Default value is "logs". The index name to use
+ for the logs. If not set, the default index name is
"logs".
},
"papertrail": {
@@ -106782,276 +109009,22 @@ def validate_app_spec(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON
}
}
"""
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- _json = body
-
- _request = build_apps_validate_app_spec_request(
- content_type=content_type,
- json=_json,
- content=_content,
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- if _stream:
- response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- response_headers = {}
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
-
- @distributed_trace
- def list_alerts(self, app_id: str, **kwargs: Any) -> JSON:
- # pylint: disable=line-too-long
- """List all app alerts.
-
- List alerts associated to the app and any components. This includes configuration information
- about the alerts including emails, slack webhooks, and triggering events or conditions.
-
- :param app_id: The app ID. Required.
- :type app_id: str
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # response body for status code(s): 200
- response == {
- "alerts": [
- {
- "component_name": "str", # Optional. Name of component the
- alert belongs to.
- "emails": [
- "" # Optional. Default value is "". Emails for
- alerts to go to.
- ],
- "id": "str", # Optional. The ID of the alert.
- "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN".
- Known values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and
- "ERROR".
- "progress": {
- "steps": [
- {
- "ended_at": "2020-02-20 00:00:00", #
- Optional. The start time of this step.
- "name": "str", # Optional. The name
- of this step.
- "reason": {
- "code": "str", # Optional.
- The error code.
- "message": "str" # Optional.
- The error message.
- },
- "started_at": "2020-02-20 00:00:00",
- # Optional. The start time of this step.
- "status": "UNKNOWN" # Optional.
- Default value is "UNKNOWN". Known values are: "UNKNOWN",
- "PENDING", "RUNNING", "ERROR", and "SUCCESS".
- }
- ]
- },
- "slack_webhooks": [
- {
- "channel": "str", # Optional. Name of the
- Slack Webhook Channel.
- "url": "str" # Optional. URL of the Slack
- webhook.
- }
- ],
- "spec": {
- "disabled": bool, # Optional. Is the alert
- disabled?.
- "operator": "UNSPECIFIED_OPERATOR", # Optional.
- Default value is "UNSPECIFIED_OPERATOR". Known values are:
- "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN".
- "rule": "UNSPECIFIED_RULE", # Optional. Default
- value is "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE",
- "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT",
- "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED",
- "DOMAIN_LIVE", "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED",
- "FUNCTIONS_ACTIVATION_COUNT", "FUNCTIONS_AVERAGE_DURATION_MS",
- "FUNCTIONS_ERROR_RATE_PER_MINUTE", "FUNCTIONS_AVERAGE_WAIT_TIME_MS",
- "FUNCTIONS_ERROR_COUNT", and "FUNCTIONS_GB_RATE_PER_SECOND".
- "value": 0.0, # Optional. Threshold value for alert.
- "window": "UNSPECIFIED_WINDOW" # Optional. Default
- value is "UNSPECIFIED_WINDOW". Known values are:
- "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
- "THIRTY_MINUTES", and "ONE_HOUR".
- }
- }
- ]
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = kwargs.pop("params", {}) or {}
-
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- _request = build_apps_list_alerts_request(
- app_id=app_id,
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200, 404]:
- if _stream:
- response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
@overload
- def assign_alert_destinations(
- self,
- app_id: str,
- alert_id: str,
- body: JSON,
- *,
- content_type: str = "application/json",
- **kwargs: Any,
+ def validate_app_spec(
+ self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
) -> JSON:
# pylint: disable=line-too-long
- """Update destinations for alerts.
+ """Propose an App Spec.
- Updates the emails and slack webhook destinations for app alerts. Emails must be associated to
- a user with access to the app.
+ To propose and validate a spec for a new or existing app, send a POST request to the
+ ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app,
+ including app cost and upgrade cost. If an existing app ID is specified, the app spec is
+ treated as a proposed update to the existing app.
- :param app_id: The app ID. Required.
- :type app_id: str
- :param alert_id: The alert ID. Required.
- :type alert_id: str
:param body: Required.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: JSON object
@@ -107061,1173 +109034,1612 @@ def assign_alert_destinations(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your body input.
- body = {
- "emails": [
- "" # Optional. Default value is "".
- ],
- "slack_webhooks": [
- {
- "channel": "str", # Optional. Name of the Slack Webhook
- Channel.
- "url": "str" # Optional. URL of the Slack webhook.
- }
- ]
- }
-
# response body for status code(s): 200
response == {
- "alert": {
- "component_name": "str", # Optional. Name of component the alert
- belongs to.
- "emails": [
- "" # Optional. Default value is "". Emails for alerts to go
- to.
- ],
- "id": "str", # Optional. The ID of the alert.
- "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known
- values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and "ERROR".
- "progress": {
- "steps": [
- {
- "ended_at": "2020-02-20 00:00:00", #
- Optional. The start time of this step.
- "name": "str", # Optional. The name of this
- step.
- "reason": {
- "code": "str", # Optional. The error
- code.
- "message": "str" # Optional. The
- error message.
- },
- "started_at": "2020-02-20 00:00:00", #
- Optional. The start time of this step.
- "status": "UNKNOWN" # Optional. Default
- value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING",
- "RUNNING", "ERROR", and "SUCCESS".
- }
- ]
- },
- "slack_webhooks": [
+ "app_cost": 0, # Optional. The monthly cost of the proposed app in USD.
+ "app_is_static": bool, # Optional. Indicates whether the app is a static
+ app.
+ "app_name_available": bool, # Optional. Indicates whether the app name is
+ available.
+ "app_name_suggestion": "str", # Optional. The suggested name if the proposed
+ app name is unavailable.
+ "app_tier_downgrade_cost": 0, # Optional. The monthly cost of the proposed
+ app in USD using the previous pricing plan tier. For example, if you propose an
+ app that uses the Professional tier, the ``app_tier_downgrade_cost`` field
+ displays the monthly cost of the app if it were to use the Basic tier. If the
+ proposed app already uses the lest expensive tier, the field is empty.
+ "existing_static_apps": "str", # Optional. The maximum number of free static
+ apps the account can have. We will charge you for any additional static apps.
+ "spec": {
+ "name": "str", # The name of the app. Must be unique across all apps
+ in the same account. Required.
+ "databases": [
{
- "channel": "str", # Optional. Name of the Slack
- Webhook Channel.
- "url": "str" # Optional. URL of the Slack webhook.
+ "name": "str", # The database's name. The name must
+ be unique across all components within the same app and cannot use
+ capital letters. Required.
+ "cluster_name": "str", # Optional. The name of the
+ underlying DigitalOcean DBaaS cluster. This is required for
+ production databases. For dev databases, if cluster_name is not set,
+ a new cluster will be provisioned.
+ "db_name": "str", # Optional. The name of the MySQL
+ or PostgreSQL database to configure.
+ "db_user": "str", # Optional. The name of the MySQL
+ or PostgreSQL user to configure.
+ "engine": "UNSET", # Optional. Default value is
+ "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB:
+ MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey.
+ Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB",
+ "KAFKA", "OPENSEARCH", and "VALKEY".
+ "production": bool, # Optional. Whether this is a
+ production or dev database.
+ "version": "str" # Optional. The version of the
+ database engine.
}
],
- "spec": {
- "disabled": bool, # Optional. Is the alert disabled?.
- "operator": "UNSPECIFIED_OPERATOR", # Optional. Default
- value is "UNSPECIFIED_OPERATOR". Known values are:
- "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN".
- "rule": "UNSPECIFIED_RULE", # Optional. Default value is
- "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE",
- "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT",
- "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE",
- "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT",
- "FUNCTIONS_AVERAGE_DURATION_MS", "FUNCTIONS_ERROR_RATE_PER_MINUTE",
- "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", and
- "FUNCTIONS_GB_RATE_PER_SECOND".
- "value": 0.0, # Optional. Threshold value for alert.
- "window": "UNSPECIFIED_WINDOW" # Optional. Default value is
- "UNSPECIFIED_WINDOW". Known values are: "UNSPECIFIED_WINDOW",
- "FIVE_MINUTES", "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR".
- }
- }
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
-
- @overload
- def assign_alert_destinations(
- self,
- app_id: str,
- alert_id: str,
- body: IO[bytes],
- *,
- content_type: str = "application/json",
- **kwargs: Any,
- ) -> JSON:
- # pylint: disable=line-too-long
- """Update destinations for alerts.
-
- Updates the emails and slack webhook destinations for app alerts. Emails must be associated to
- a user with access to the app.
-
- :param app_id: The app ID. Required.
- :type app_id: str
- :param alert_id: The alert ID. Required.
- :type alert_id: str
- :param body: Required.
- :type body: IO[bytes]
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # response body for status code(s): 200
- response == {
- "alert": {
- "component_name": "str", # Optional. Name of component the alert
- belongs to.
- "emails": [
- "" # Optional. Default value is "". Emails for alerts to go
- to.
+ "disable_edge_cache": False, # Optional. Default value is False. ..
+ role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app
+ will **not** be cached at the edge (CDN). Enable this option if you want to
+ manage CDN configuration yourself"u2014whether by using an external CDN
+ provider or by handling static content and caching within your app. This
+ setting is also recommended for apps that require real-time data or serve
+ dynamic content, such as those using Server-Sent Events (SSE) over GET, or
+ hosting an MCP (Model Context Protocol) Server that utilizes SSE.""
+ :raw-html-m2r:`
` **Note:** This feature is not available for static site
+ components."" :raw-html-m2r:`
` For more information, see `Disable CDN
+ Cache
+ `_.
+ "disable_email_obfuscation": False, # Optional. Default value is
+ False. If set to ``true``"" , email addresses in the app will not be
+ obfuscated. This is useful for apps that require email addresses to be
+ visible (in the HTML markup).
+ "domains": [
+ {
+ "domain": "str", # The hostname for the domain.
+ Required.
+ "minimum_tls_version": "str", # Optional. The
+ minimum version of TLS a client application can use to access
+ resources for the domain. Must be one of the following values
+ wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are:
+ "1.2" and "1.3".
+ "type": "UNSPECIFIED", # Optional. Default value is
+ "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain
+ assigned to this app * PRIMARY: The primary domain for this app that
+ is displayed as the default in the control panel, used in bindable
+ environment variables, and any other places that reference an app's
+ live URL. Only one domain may be set as primary. * ALIAS: A
+ non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT",
+ "PRIMARY", and "ALIAS".
+ "wildcard": bool, # Optional. Indicates whether the
+ domain includes all sub-domains, in addition to the given domain.
+ "zone": "str" # Optional. Optional. If the domain
+ uses DigitalOcean DNS and you would like App Platform to
+ automatically manage it for you, set this to the name of the domain
+ on your account. For example, If the domain you are adding is
+ ``app.domain.com``"" , the zone could be ``domain.com``.
+ }
],
- "id": "str", # Optional. The ID of the alert.
- "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known
- values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and "ERROR".
- "progress": {
- "steps": [
- {
- "ended_at": "2020-02-20 00:00:00", #
- Optional. The start time of this step.
- "name": "str", # Optional. The name of this
- step.
- "reason": {
- "code": "str", # Optional. The error
- code.
- "message": "str" # Optional. The
- error message.
- },
- "started_at": "2020-02-20 00:00:00", #
- Optional. The start time of this step.
- "status": "UNKNOWN" # Optional. Default
- value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING",
- "RUNNING", "ERROR", and "SUCCESS".
- }
- ]
+ "egress": {
+ "type": "AUTOASSIGN" # Optional. Default value is
+ "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and
+ "DEDICATED_IP".
},
- "slack_webhooks": [
+ "enhanced_threat_control_enabled": False, # Optional. Default value
+ is False. If set to ``true``"" , suspicious requests will go through
+ additional security checks to help mitigate layer 7 DDoS attacks.
+ "functions": [
{
- "channel": "str", # Optional. Name of the Slack
- Webhook Channel.
- "url": "str" # Optional. URL of the Slack webhook.
+ "name": "str", # The name. Must be unique across all
+ components within the same app. Required.
+ "alerts": [
+ {
+ "disabled": bool, # Optional. Is the
+ alert disabled?.
+ "operator": "UNSPECIFIED_OPERATOR",
+ # Optional. Default value is "UNSPECIFIED_OPERATOR". Known
+ values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and
+ "LESS_THAN".
+ "rule": "UNSPECIFIED_RULE", #
+ Optional. Default value is "UNSPECIFIED_RULE". Known values
+ are: "UNSPECIFIED_RULE", "CPU_UTILIZATION",
+ "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED",
+ "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE",
+ "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED",
+ "FUNCTIONS_ACTIVATION_COUNT",
+ "FUNCTIONS_AVERAGE_DURATION_MS",
+ "FUNCTIONS_ERROR_RATE_PER_MINUTE",
+ "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT",
+ and "FUNCTIONS_GB_RATE_PER_SECOND".
+ "value": 0.0, # Optional. Threshold
+ value for alert.
+ "window": "UNSPECIFIED_WINDOW" #
+ Optional. Default value is "UNSPECIFIED_WINDOW". Known values
+ are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
+ "THIRTY_MINUTES", and "ONE_HOUR".
+ }
+ ],
+ "bitbucket": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "deploy_on_push": bool, # Optional. Whether
+ to automatically deploy new commits made to the repo.
+ "repo": "str" # Optional. The name of the
+ repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "cors": {
+ "allow_credentials": bool, # Optional.
+ Whether browsers should expose the response to the client-side
+ JavaScript code when the request"u2019s credentials mode is
+ include. This configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" # Optional. The set of allowed
+ HTTP request headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" # Optional. The set of allowed
+ HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", # Optional.
+ Exact string match. Only 1 of ``exact``"" , ``prefix``""
+ , or ``regex`` must be set.
+ "prefix": "str", # Optional.
+ Prefix-based match. Only 1 of ``exact``"" , ``prefix``""
+ , or ``regex`` must be set.
+ "regex": "str" # Optional.
+ RE2 style regex-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set. For more
+ information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional. The set of HTTP
+ response headers that browsers are allowed to access. This
+ configures the ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional. An optional
+ duration specifying how long browsers can cache the results of a
+ preflight request. This configures the ``Access-Control-Max-Age``
+ header.
+ },
+ "envs": [
+ {
+ "key": "str", # The variable name.
+ Required.
+ "scope": "RUN_AND_BUILD_TIME", #
+ Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
+ Made available only at run-time * BUILD_TIME: Made available
+ only at build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", # Optional.
+ Default value is "GENERAL". * GENERAL: A plain-text
+ environment variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and "SECRET".
+ "value": "str" # Optional. The
+ value. If the type is ``SECRET``"" , the value will be
+ encrypted on first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "repo_clone_url": "str" # Optional. The
+ clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "deploy_on_push": bool, # Optional. Whether
+ to automatically deploy new commits made to the repo.
+ "repo": "str" # Optional. The name of the
+ repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "deploy_on_push": bool, # Optional. Whether
+ to automatically deploy new commits made to the repo.
+ "repo": "str" # Optional. The name of the
+ repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "log_destinations": [
+ {
+ "name": "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog
+ API key. Required.
+ "endpoint": "str" #
+ Optional. Datadog HTTP log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional.
+ Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", #
+ Optional. Password for user defined in User. Is
+ required when ``endpoint`` is set. Cannot be set if
+ using a DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" #
+ Optional. Username to authenticate with. Only
+ required when ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", #
+ Optional. The name of a DigitalOcean DBaaS OpenSearch
+ cluster to use as a log forwarding destination. Cannot be
+ specified if ``endpoint`` is also specified.
+ "endpoint": "str", #
+ Optional. OpenSearch API Endpoint. Only HTTPS is
+ supported. Format: https://:code:``::code:``.
+ Cannot be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" #
+ Optional. Default value is "logs". The index name to use
+ for the logs. If not set, the default index name is
+ "logs".
+ },
+ "papertrail": {
+ "endpoint": "str" #
+ Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "routes": [
+ {
+ "path": "str", # Optional.
+ (Deprecated - Use Ingress Rules instead). An HTTP path
+ prefix. Paths must start with / and must be unique across all
+ components within an app.
+ "preserve_path_prefix": bool #
+ Optional. An optional flag to preserve the path that is
+ forwarded to the backend service. By default, the HTTP
+ request path will be trimmed from the left when forwarded to
+ the component. For example, a component with ``path=/api``
+ will have requests to ``/api/list`` trimmed to ``/list``. If
+ this value is ``true``"" , the path will remain
+ ``/api/list``.
+ }
+ ],
+ "source_dir": "str" # Optional. An optional path to
+ the working directory to use for the build. For Dockerfile builds,
+ this will be used as the build context. Must be relative to the root
+ of the repo.
}
],
- "spec": {
- "disabled": bool, # Optional. Is the alert disabled?.
- "operator": "UNSPECIFIED_OPERATOR", # Optional. Default
- value is "UNSPECIFIED_OPERATOR". Known values are:
- "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN".
- "rule": "UNSPECIFIED_RULE", # Optional. Default value is
- "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE",
- "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT",
- "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE",
- "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT",
- "FUNCTIONS_AVERAGE_DURATION_MS", "FUNCTIONS_ERROR_RATE_PER_MINUTE",
- "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", and
- "FUNCTIONS_GB_RATE_PER_SECOND".
- "value": 0.0, # Optional. Threshold value for alert.
- "window": "UNSPECIFIED_WINDOW" # Optional. Default value is
- "UNSPECIFIED_WINDOW". Known values are: "UNSPECIFIED_WINDOW",
- "FIVE_MINUTES", "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR".
- }
- }
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
-
- @distributed_trace
- def assign_alert_destinations(
- self, app_id: str, alert_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any
- ) -> JSON:
- # pylint: disable=line-too-long
- """Update destinations for alerts.
-
- Updates the emails and slack webhook destinations for app alerts. Emails must be associated to
- a user with access to the app.
-
- :param app_id: The app ID. Required.
- :type app_id: str
- :param alert_id: The alert ID. Required.
- :type alert_id: str
- :param body: Is either a JSON type or a IO[bytes] type. Required.
- :type body: JSON or IO[bytes]
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "emails": [
- "" # Optional. Default value is "".
- ],
- "slack_webhooks": [
- {
- "channel": "str", # Optional. Name of the Slack Webhook
- Channel.
- "url": "str" # Optional. URL of the Slack webhook.
- }
- ]
- }
-
- # response body for status code(s): 200
- response == {
- "alert": {
- "component_name": "str", # Optional. Name of component the alert
- belongs to.
- "emails": [
- "" # Optional. Default value is "". Emails for alerts to go
- to.
- ],
- "id": "str", # Optional. The ID of the alert.
- "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known
- values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and "ERROR".
- "progress": {
- "steps": [
+ "ingress": {
+ "rules": [
{
- "ended_at": "2020-02-20 00:00:00", #
- Optional. The start time of this step.
- "name": "str", # Optional. The name of this
- step.
- "reason": {
- "code": "str", # Optional. The error
- code.
- "message": "str" # Optional. The
- error message.
+ "component": {
+ "name": "str", # The name of the
+ component to route to. Required.
+ "preserve_path_prefix": "str", #
+ Optional. An optional flag to preserve the path that is
+ forwarded to the backend service. By default, the HTTP
+ request path will be trimmed from the left when forwarded to
+ the component. For example, a component with ``path=/api``
+ will have requests to ``/api/list`` trimmed to ``/list``. If
+ this value is ``true``"" , the path will remain
+ ``/api/list``. Note: this is not applicable for Functions
+ Components and is mutually exclusive with ``rewrite``.
+ "rewrite": "str" # Optional. An
+ optional field that will rewrite the path of the component to
+ be what is specified here. By default, the HTTP request path
+ will be trimmed from the left when forwarded to the
+ component. For example, a component with ``path=/api`` will
+ have requests to ``/api/list`` trimmed to ``/list``. If you
+ specified the rewrite to be ``/v1/``"" , requests to
+ ``/api/list`` would be rewritten to ``/v1/list``. Note: this
+ is mutually exclusive with ``preserve_path_prefix``.
},
- "started_at": "2020-02-20 00:00:00", #
- Optional. The start time of this step.
- "status": "UNKNOWN" # Optional. Default
- value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING",
- "RUNNING", "ERROR", and "SUCCESS".
+ "cors": {
+ "allow_credentials": bool, #
+ Optional. Whether browsers should expose the response to the
+ client-side JavaScript code when the request"u2019s
+ credentials mode is include. This configures the
+ ``Access-Control-Allow-Credentials`` header.
+ "allow_headers": [
+ "str" # Optional. The set of
+ allowed HTTP request headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
+ ],
+ "allow_methods": [
+ "str" # Optional. The set of
+ allowed HTTP methods. This configures the
+ ``Access-Control-Allow-Methods`` header.
+ ],
+ "allow_origins": [
+ {
+ "exact": "str", #
+ Optional. Exact string match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set.
+ "prefix": "str", #
+ Optional. Prefix-based match. Only 1 of ``exact``"" ,
+ ``prefix``"" , or ``regex`` must be set.
+ "regex": "str" #
+ Optional. RE2 style regex-based match. Only 1 of
+ ``exact``"" , ``prefix``"" , or ``regex`` must be
+ set. For more information about RE2 syntax, see:
+ https://github.com/google/re2/wiki/Syntax.
+ }
+ ],
+ "expose_headers": [
+ "str" # Optional. The set of
+ HTTP response headers that browsers are allowed to
+ access. This configures the
+ ``Access-Control-Expose-Headers`` header.
+ ],
+ "max_age": "str" # Optional. An
+ optional duration specifying how long browsers can cache the
+ results of a preflight request. This configures the
+ ``Access-Control-Max-Age`` header.
+ },
+ "match": {
+ "authority": {
+ "exact": "str" # Required.
+ },
+ "path": {
+ "prefix": "str" #
+ Prefix-based match. For example, ``/api`` will match
+ ``/api``"" , ``/api/``"" , and any nested paths such as
+ ``/api/v1/endpoint``. Required.
+ }
+ },
+ "redirect": {
+ "authority": "str", # Optional. The
+ authority/host to redirect to. This can be a hostname or IP
+ address. Note: use ``port`` to set the port.
+ "port": 0, # Optional. The port to
+ redirect to.
+ "redirect_code": 0, # Optional. The
+ redirect code to use. Defaults to ``302``. Supported values
+ are 300, 301, 302, 303, 304, 307, 308.
+ "scheme": "str", # Optional. The
+ scheme to redirect to. Supported values are ``http`` or
+ ``https``. Default: ``https``.
+ "uri": "str" # Optional. An optional
+ URI path to redirect to. Note: if this is specified the whole
+ URI of the original request will be overwritten to this
+ value, irrespective of the original request URI being
+ matched.
+ }
}
]
},
- "slack_webhooks": [
+ "jobs": [
{
- "channel": "str", # Optional. Name of the Slack
- Webhook Channel.
- "url": "str" # Optional. URL of the Slack webhook.
- }
- ],
- "spec": {
- "disabled": bool, # Optional. Is the alert disabled?.
- "operator": "UNSPECIFIED_OPERATOR", # Optional. Default
- value is "UNSPECIFIED_OPERATOR". Known values are:
- "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN".
- "rule": "UNSPECIFIED_RULE", # Optional. Default value is
- "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE",
- "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT",
- "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE",
- "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT",
- "FUNCTIONS_AVERAGE_DURATION_MS", "FUNCTIONS_ERROR_RATE_PER_MINUTE",
- "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", and
- "FUNCTIONS_GB_RATE_PER_SECOND".
- "value": 0.0, # Optional. Threshold value for alert.
- "window": "UNSPECIFIED_WINDOW" # Optional. Default value is
- "UNSPECIFIED_WINDOW". Known values are: "UNSPECIFIED_WINDOW",
- "FIVE_MINUTES", "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR".
- }
- }
- }
- # response body for status code(s): 404
- response == {
- "id": "str", # A short identifier corresponding to the HTTP status code
- returned. For example, the ID for a response returning a 404 status code would
- be "not_found.". Required.
- "message": "str", # A message providing additional information about the
- error, including details to help resolve it when possible. Required.
- "request_id": "str" # Optional. Optionally, some endpoints may include a
- request ID that should be provided when reporting bugs or opening support
- tickets to help identify the issue.
- }
- """
- error_map: MutableMapping[int, Type[HttpResponseError]] = {
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- 401: cast(
- Type[HttpResponseError],
- lambda response: ClientAuthenticationError(response=response),
- ),
- 429: HttpResponseError,
- 500: HttpResponseError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = kwargs.pop("params", {}) or {}
-
- content_type: Optional[str] = kwargs.pop(
- "content_type", _headers.pop("Content-Type", None)
- )
- cls: ClsType[JSON] = kwargs.pop("cls", None)
-
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(body, (IOBase, bytes)):
- _content = body
- else:
- _json = body
-
- _request = build_apps_assign_alert_destinations_request(
- app_id=app_id,
- alert_id=alert_id,
- content_type=content_type,
- json=_json,
- content=_content,
- headers=_headers,
- params=_params,
- )
- _request.url = self._client.format_url(_request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = (
- self._client._pipeline.run( # pylint: disable=protected-access
- _request, stream=_stream, **kwargs
- )
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200, 404]:
- if _stream:
- response.read() # Load the body in memory and close the socket
- map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore
- raise HttpResponseError(response=response)
-
- response_headers = {}
- if response.status_code == 200:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if response.status_code == 404:
- response_headers["ratelimit-limit"] = self._deserialize(
- "int", response.headers.get("ratelimit-limit")
- )
- response_headers["ratelimit-remaining"] = self._deserialize(
- "int", response.headers.get("ratelimit-remaining")
- )
- response_headers["ratelimit-reset"] = self._deserialize(
- "int", response.headers.get("ratelimit-reset")
- )
-
- if response.content:
- deserialized = response.json()
- else:
- deserialized = None
-
- if cls:
- return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore
-
- return cast(JSON, deserialized) # type: ignore
-
- @overload
- def create_rollback(
- self,
- app_id: str,
- body: JSON,
- *,
- content_type: str = "application/json",
- **kwargs: Any,
- ) -> JSON:
- # pylint: disable=line-too-long
- """Rollback App.
-
- Rollback an app to a previous deployment. A new deployment will be created to perform the
- rollback.
- The app will be pinned to the rollback deployment preventing any new deployments from being
- created,
- either manually or through Auto Deploy on Push webhooks. To resume deployments, the rollback
- must be
- either committed or reverted.
-
- It is recommended to use the Validate App Rollback endpoint to double check if the rollback is
- valid and if there are any warnings.
-
- :param app_id: The app ID. Required.
- :type app_id: str
- :param body: Required.
- :type body: JSON
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :return: JSON object
- :rtype: JSON
- :raises ~azure.core.exceptions.HttpResponseError:
-
- Example:
- .. code-block:: python
-
- # JSON input template you can fill out and use as your body input.
- body = {
- "deployment_id": "str", # Optional. The ID of the deployment to rollback to.
- "skip_pin": bool # Optional. Whether to skip pinning the rollback
- deployment. If false, the rollback deployment will be pinned and any new
- deployments including Auto Deploy on Push hooks will be disabled until the
- rollback is either manually committed or reverted via the CommitAppRollback or
- RevertAppRollback endpoints respectively. If true, the rollback will be
- immediately committed and the app will remain unpinned.
- }
-
- # response body for status code(s): 200
- response == {
- "deployment": {
- "cause": "str", # Optional. What caused this deployment to be
- created.
- "cloned_from": "str", # Optional. The ID of a previous deployment
- that this deployment was cloned from.
- "created_at": "2020-02-20 00:00:00", # Optional. The creation time
- of the deployment.
- "functions": [
- {
- "name": "str", # Optional. The name of this
- functions component.
- "namespace": "str", # Optional. The namespace where
- the functions are deployed.
- "source_commit_hash": "str" # Optional. The commit
- hash of the repository that was used to build this functions
- component.
- }
- ],
- "id": "str", # Optional. The ID of the deployment.
- "jobs": [
- {
- "name": "str", # Optional. The name of this job.
- "source_commit_hash": "str" # Optional. The commit
- hash of the repository that was used to build this job.
- }
- ],
- "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known
- values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", "PENDING_DEPLOY",
- "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and "CANCELED".
- "phase_last_updated_at": "2020-02-20 00:00:00", # Optional. When the
- deployment phase was last updated.
- "progress": {
- "error_steps": 0, # Optional. Number of unsuccessful steps.
- "pending_steps": 0, # Optional. Number of pending steps.
- "running_steps": 0, # Optional. Number of currently running
- steps.
- "steps": [
- {
- "component_name": "str", # Optional. The
- component name that this step is associated with.
- "ended_at": "2020-02-20 00:00:00", #
- Optional. The end time of this step.
- "message_base": "str", # Optional. The base
- of a human-readable description of the step intended to be
- combined with the component name for presentation. For example:
- ``message_base`` = "Building service" ``component_name`` = "api".
- "name": "str", # Optional. The name of this
- step.
- "reason": {
- "code": "str", # Optional. The error
- code.
- "message": "str" # Optional. The
- error message.
+ "autoscaling": {
+ "max_instance_count": 0, # Optional. The
+ maximum amount of instances for this component. Must be more than
+ min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80 # Optional.
+ Default value is 80. The average target CPU utilization
+ for the component.
+ }
},
- "started_at": "2020-02-20 00:00:00", #
- Optional. The start time of this step.
- "status": "UNKNOWN", # Optional. Default
- value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING",
- "RUNNING", "ERROR", and "SUCCESS".
- "steps": [
- {} # Optional. Child steps of this
- step.
- ]
- }
- ],
- "success_steps": 0, # Optional. Number of successful steps.
- "summary_steps": [
- {
- "component_name": "str", # Optional. The
- component name that this step is associated with.
- "ended_at": "2020-02-20 00:00:00", #
- Optional. The end time of this step.
- "message_base": "str", # Optional. The base
- of a human-readable description of the step intended to be
- combined with the component name for presentation. For example:
- ``message_base`` = "Building service" ``component_name`` = "api".
- "name": "str", # Optional. The name of this
- step.
- "reason": {
- "code": "str", # Optional. The error
- code.
- "message": "str" # Optional. The
- error message.
+ "min_instance_count": 0 # Optional. The
+ minimum amount of instances for this component. Must be less than
+ max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "deploy_on_push": bool, # Optional. Whether
+ to automatically deploy new commits made to the repo.
+ "repo": "str" # Optional. The name of the
+ repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional. An optional
+ build command to run while building this component from source.
+ "dockerfile_path": "str", # Optional. The path to
+ the Dockerfile relative to the root of the repo. If set, it will be
+ used to build this component. Otherwise, App Platform will attempt to
+ build it using buildpacks.
+ "environment_slug": "str", # Optional. An
+ environment slug describing the type of this app. For a full list,
+ please refer to `the product documentation
+ `_.
+ "envs": [
+ {
+ "key": "str", # The variable name.
+ Required.
+ "scope": "RUN_AND_BUILD_TIME", #
+ Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME:
+ Made available only at run-time * BUILD_TIME: Made available
+ only at build-time * RUN_AND_BUILD_TIME: Made available at
+ both build and run-time. Known values are: "UNSET",
+ "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME".
+ "type": "GENERAL", # Optional.
+ Default value is "GENERAL". * GENERAL: A plain-text
+ environment variable * SECRET: A secret encrypted environment
+ variable. Known values are: "GENERAL" and "SECRET".
+ "value": "str" # Optional. The
+ value. If the type is ``SECRET``"" , the value will be
+ encrypted on first submission. On following submissions, the
+ encrypted value should be used.
+ }
+ ],
+ "git": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "repo_clone_url": "str" # Optional. The
+ clone URL of the repo. Example:
+ ``https://github.com/digitalocean/sample-golang.git``.
+ },
+ "github": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "deploy_on_push": bool, # Optional. Whether
+ to automatically deploy new commits made to the repo.
+ "repo": "str" # Optional. The name of the
+ repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "gitlab": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "deploy_on_push": bool, # Optional. Whether
+ to automatically deploy new commits made to the repo.
+ "repo": "str" # Optional. The name of the
+ repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "image": {
+ "deploy_on_push": {
+ "enabled": bool # Optional. Whether
+ to automatically deploy new images. Can only be used for
+ images hosted in DOCR and can only be used with an image tag,
+ not a specific digest.
},
- "started_at": "2020-02-20 00:00:00", #
- Optional. The start time of this step.
- "status": "UNKNOWN", # Optional. Default
- value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING",
- "RUNNING", "ERROR", and "SUCCESS".
- "steps": [
- {} # Optional. Child steps of this
- step.
- ]
+ "digest": "str", # Optional. The image
+ digest. Cannot be specified if tag is provided.
+ "registry": "str", # Optional. The registry
+ name. Must be left empty for the ``DOCR`` registry type.
+ "registry_credentials": "str", # Optional.
+ The credentials to be able to pull the image. The value will be
+ encrypted on first submission. On following submissions, the
+ encrypted value should be used. * "$username:$access_token" for
+ registries of type ``DOCKER_HUB``. * "$username:$access_token"
+ for registries of type ``GHCR``.
+ "registry_type": "str", # Optional. *
+ DOCKER_HUB: The DockerHub container registry type. * DOCR: The
+ DigitalOcean container registry type. * GHCR: The Github
+ container registry type. Known values are: "DOCKER_HUB", "DOCR",
+ and "GHCR".
+ "repository": "str", # Optional. The
+ repository name.
+ "tag": "latest" # Optional. Default value is
+ "latest". The repository tag. Defaults to ``latest`` if not
+ provided and no digest is provided. Cannot be specified if digest
+ is provided.
+ },
+ "instance_count": 1, # Optional. Default value is 1.
+ The amount of instances that this component should be scaled to.
+ Default: 1. Must not be set if autoscaling is used.
+ "instance_size_slug": {},
+ "kind": "UNSPECIFIED", # Optional. Default value is
+ "UNSPECIFIED". * UNSPECIFIED: Default job type, will auto-complete to
+ POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job that runs before an
+ app deployment. * POST_DEPLOY: Indicates a job that runs after an app
+ deployment. * FAILED_DEPLOY: Indicates a job that runs after a
+ component fails to deploy. Known values are: "UNSPECIFIED",
+ "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY".
+ "log_destinations": [
+ {
+ "name": "str", # Required.
+ "datadog": {
+ "api_key": "str", # Datadog
+ API key. Required.
+ "endpoint": "str" #
+ Optional. Datadog HTTP log intake endpoint.
+ },
+ "logtail": {
+ "token": "str" # Optional.
+ Logtail token.
+ },
+ "open_search": {
+ "basic_auth": {
+ "password": "str", #
+ Optional. Password for user defined in User. Is
+ required when ``endpoint`` is set. Cannot be set if
+ using a DigitalOcean DBaaS OpenSearch cluster.
+ "user": "str" #
+ Optional. Username to authenticate with. Only
+ required when ``endpoint`` is set. Defaults to
+ ``doadmin`` when ``cluster_name`` is set.
+ },
+ "cluster_name": "str", #
+ Optional. The name of a DigitalOcean DBaaS OpenSearch
+ cluster to use as a log forwarding destination. Cannot be
+ specified if ``endpoint`` is also specified.
+ "endpoint": "str", #
+ Optional. OpenSearch API Endpoint. Only HTTPS is
+ supported. Format: https://:code:``::code:``.
+ Cannot be specified if ``cluster_name`` is also
+ specified.
+ "index_name": "logs" #
+ Optional. Default value is "logs". The index name to use
+ for the logs. If not set, the default index name is
+ "logs".
+ },
+ "papertrail": {
+ "endpoint": "str" #
+ Papertrail syslog endpoint. Required.
+ }
+ }
+ ],
+ "name": "str", # Optional. The name. Must be unique
+ across all components within the same app.
+ "run_command": "str", # Optional. An optional run
+ command to override the component's default.
+ "source_dir": "str", # Optional. An optional path to
+ the working directory to use for the build. For Dockerfile builds,
+ this will be used as the build context. Must be relative to the root
+ of the repo.
+ "termination": {
+ "grace_period_seconds": 0 # Optional. The
+ number of seconds to wait between sending a TERM signal to a
+ container and issuing a KILL which causes immediate shutdown.
+ (Default 120).
}
- ],
- "total_steps": 0 # Optional. Total number of steps.
+ }
+ ],
+ "maintenance": {
+ "archive": bool, # Optional. Indicates whether the app
+ should be archived. Setting this to true implies that enabled is set to
+ true.
+ "enabled": bool, # Optional. Indicates whether maintenance
+ mode should be enabled for the app.
+ "offline_page_url": "str" # Optional. A custom offline page
+ to display when maintenance mode is enabled or the app is archived.
},
+ "region": "str", # Optional. The slug form of the geographical
+ origin of the app. Default: ``nearest available``. Known values are: "atl",
+ "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", "sgp", and "syd".
"services": [
{
- "name": "str", # Optional. The name of this service.
- "source_commit_hash": "str" # Optional. The commit
- hash of the repository that was used to build this service.
- }
- ],
- "spec": {
- "name": "str", # The name of the app. Must be unique across
- all apps in the same account. Required.
- "databases": [
- {
- "name": "str", # The database's name. The
- name must be unique across all components within the same app and
- cannot use capital letters. Required.
- "cluster_name": "str", # Optional. The name
- of the underlying DigitalOcean DBaaS cluster. This is required
- for production databases. For dev databases, if cluster_name is
- not set, a new cluster will be provisioned.
- "db_name": "str", # Optional. The name of
- the MySQL or PostgreSQL database to configure.
- "db_user": "str", # Optional. The name of
- the MySQL or PostgreSQL user to configure.
- "engine": "UNSET", # Optional. Default value
- is "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching *
- MONGODB: MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch *
- VALKEY: ValKey. Known values are: "UNSET", "MYSQL", "PG",
- "REDIS", "MONGODB", "KAFKA", "OPENSEARCH", and "VALKEY".
- "production": bool, # Optional. Whether this
- is a production or dev database.
- "version": "str" # Optional. The version of
- the database engine.
- }
- ],
- "disable_edge_cache": False, # Optional. Default value is
- False. .. role:: raw-html-m2r(raw) :format: html If set to
- ``true``"" , the app will **not** be cached at the edge (CDN). Enable
- this option if you want to manage CDN configuration yourself"u2014whether
- by using an external CDN provider or by handling static content and
- caching within your app. This setting is also recommended for apps that
- require real-time data or serve dynamic content, such as those using
- Server-Sent Events (SSE) over GET, or hosting an MCP (Model Context
- Protocol) Server that utilizes SSE."" :raw-html-m2r:`
` **Note:** This
- feature is not available for static site components.""
- :raw-html-m2r:`
` For more information, see `Disable CDN Cache
- `_.
- "disable_email_obfuscation": False, # Optional. Default
- value is False. If set to ``true``"" , email addresses in the app will
- not be obfuscated. This is useful for apps that require email addresses
- to be visible (in the HTML markup).
- "domains": [
- {
- "domain": "str", # The hostname for the
- domain. Required.
- "minimum_tls_version": "str", # Optional.
- The minimum version of TLS a client application can use to access
- resources for the domain. Must be one of the following values
- wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values
- are: "1.2" and "1.3".
- "type": "UNSPECIFIED", # Optional. Default
- value is "UNSPECIFIED". * DEFAULT: The default
- ``.ondigitalocean.app`` domain assigned to this app * PRIMARY:
- The primary domain for this app that is displayed as the default
- in the control panel, used in bindable environment variables, and
- any other places that reference an app's live URL. Only one
- domain may be set as primary. * ALIAS: A non-primary domain.
- Known values are: "UNSPECIFIED", "DEFAULT", "PRIMARY", and
- "ALIAS".
- "wildcard": bool, # Optional. Indicates
- whether the domain includes all sub-domains, in addition to the
- given domain.
- "zone": "str" # Optional. Optional. If the
- domain uses DigitalOcean DNS and you would like App Platform to
- automatically manage it for you, set this to the name of the
- domain on your account. For example, If the domain you are
- adding is ``app.domain.com``"" , the zone could be
- ``domain.com``.
- }
- ],
- "egress": {
- "type": "AUTOASSIGN" # Optional. Default value is
- "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and
- "DEDICATED_IP".
- },
- "enhanced_threat_control_enabled": False, # Optional.
- Default value is False. If set to ``true``"" , suspicious requests will
- go through additional security checks to help mitigate layer 7 DDoS
- attacks.
- "functions": [
- {
- "name": "str", # The name. Must be unique
- across all components within the same app. Required.
- "alerts": [
- {
- "disabled": bool, #
- Optional. Is the alert disabled?.
- "operator":
- "UNSPECIFIED_OPERATOR", # Optional. Default value is
- "UNSPECIFIED_OPERATOR". Known values are:
- "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN".
- "rule": "UNSPECIFIED_RULE",
- # Optional. Default value is "UNSPECIFIED_RULE". Known
- values are: "UNSPECIFIED_RULE", "CPU_UTILIZATION",
- "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED",
- "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE",
- "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED",
- "FUNCTIONS_ACTIVATION_COUNT",
- "FUNCTIONS_AVERAGE_DURATION_MS",
- "FUNCTIONS_ERROR_RATE_PER_MINUTE",
- "FUNCTIONS_AVERAGE_WAIT_TIME_MS",
- "FUNCTIONS_ERROR_COUNT", and
- "FUNCTIONS_GB_RATE_PER_SECOND".
- "value": 0.0, # Optional.
- Threshold value for alert.
- "window":
- "UNSPECIFIED_WINDOW" # Optional. Default value is
- "UNSPECIFIED_WINDOW". Known values are:
- "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES",
- "THIRTY_MINUTES", and "ONE_HOUR".
+ "autoscaling": {
+ "max_instance_count": 0, # Optional. The
+ maximum amount of instances for this component. Must be more than
+ min_instance_count.
+ "metrics": {
+ "cpu": {
+ "percent": 80 # Optional.
+ Default value is 80. The average target CPU utilization
+ for the component.
}
- ],
- "bitbucket": {
- "branch": "str", # Optional. The
- name of the branch to use.
- "deploy_on_push": bool, # Optional.
- Whether to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name
- of the repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "cors": {
- "allow_credentials": bool, #
- Optional. Whether browsers should expose the response to the
- client-side JavaScript code when the request"u2019s
- credentials mode is include. This configures the
- ``Access-Control-Allow-Credentials`` header.
- "allow_headers": [
- "str" # Optional. The set of
- allowed HTTP request headers. This configures the
- ``Access-Control-Allow-Headers`` header.
- ],
- "allow_methods": [
- "str" # Optional. The set of
- allowed HTTP methods. This configures the
- ``Access-Control-Allow-Methods`` header.
- ],
- "allow_origins": [
- {
- "exact": "str", #
- Optional. Exact string match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set.
- "prefix": "str", #
- Optional. Prefix-based match. Only 1 of ``exact``"" ,
- ``prefix``"" , or ``regex`` must be set.
- "regex": "str" #
- Optional. RE2 style regex-based match. Only 1 of
- ``exact``"" , ``prefix``"" , or ``regex`` must be
- set. For more information about RE2 syntax, see:
- https://github.com/google/re2/wiki/Syntax.
- }
- ],
- "expose_headers": [
- "str" # Optional. The set of
- HTTP response headers that browsers are allowed to
- access. This configures the
- ``Access-Control-Expose-Headers`` header.
- ],
- "max_age": "str" # Optional. An
- optional duration specifying how long browsers can cache the
- results of a preflight request. This configures the
- ``Access-Control-Max-Age`` header.
},
- "envs": [
- {
- "key": "str", # The variable
- name. Required.
- "scope":
- "RUN_AND_BUILD_TIME", # Optional. Default value is
- "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at
- run-time * BUILD_TIME: Made available only at build-time
- * RUN_AND_BUILD_TIME: Made available at both build and
- run-time. Known values are: "UNSET", "RUN_TIME",
- "BUILD_TIME", and "RUN_AND_BUILD_TIME".
- "type": "GENERAL", #
- Optional. Default value is "GENERAL". * GENERAL: A
- plain-text environment variable * SECRET: A secret
- encrypted environment variable. Known values are:
- "GENERAL" and "SECRET".
- "value": "str" # Optional.
- The value. If the type is ``SECRET``"" , the value will
- be encrypted on first submission. On following
- submissions, the encrypted value should be used.
- }
+ "min_instance_count": 0 # Optional. The
+ minimum amount of instances for this component. Must be less than
+ max_instance_count.
+ },
+ "bitbucket": {
+ "branch": "str", # Optional. The name of the
+ branch to use.
+ "deploy_on_push": bool, # Optional. Whether
+ to automatically deploy new commits made to the repo.
+ "repo": "str" # Optional. The name of the
+ repo in the format owner/repo. Example:
+ ``digitalocean/sample-golang``.
+ },
+ "build_command": "str", # Optional. An optional
+ build command to run while building this component from source.
+ "cors": {
+ "allow_credentials": bool, # Optional.
+ Whether browsers should expose the response to the client-side
+ JavaScript code when the request"u2019s credentials mode is
+ include. This configures the ``Access-Control-Allow-Credentials``
+ header.
+ "allow_headers": [
+ "str" # Optional. The set of allowed
+ HTTP request headers. This configures the
+ ``Access-Control-Allow-Headers`` header.
],
- "git": {
- "branch": "str", # Optional. The
- name of the branch to use.
- "repo_clone_url": "str" # Optional.
- The clone URL of the repo. Example:
- ``https://github.com/digitalocean/sample-golang.git``.
- },
- "github": {
- "branch": "str", # Optional. The
- name of the branch to use.
- "deploy_on_push": bool, # Optional.
- Whether to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name
- of the repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "gitlab": {
- "branch": "str", # Optional. The
- name of the branch to use.
- "deploy_on_push": bool, # Optional.
- Whether to automatically deploy new commits made to the repo.
- "repo": "str" # Optional. The name
- of the repo in the format owner/repo. Example:
- ``digitalocean/sample-golang``.
- },
- "log_destinations": [
- {
- "name": "str", # Required.
- "datadog": {
- "api_key": "str", #
- Datadog API key. Required.
- "endpoint": "str" #
- Optional. Datadog HTTP log intake endpoint.
- },
- "logtail": {
- "token": "str" #
- Optional. Logtail token.
- },
- "open_search": {
- "basic_auth": {
- "password":
- "str", # Optional. Password for user defined in
- User. Is required when ``endpoint`` is set.
- Cannot be set if using a DigitalOcean DBaaS
- OpenSearch cluster.
- "user": "str"
- # Optional. Username to authenticate with. Only
- required when ``endpoint`` is set. Defaults to
- ``doadmin`` when ``cluster_name`` is set.
- },
- "cluster_name":
- "str", # Optional. The name of a DigitalOcean DBaaS
- OpenSearch cluster to use as a log forwarding
- destination. Cannot be specified if ``endpoint`` is
- also specified.
- "endpoint": "str", #
- Optional. OpenSearch API Endpoint. Only HTTPS is
- supported. Format:
- https://:code:``::code:`